{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "There are 4 GPU(s) available.\nWe will use the GPU: TITAN X (Pascal)\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "# If there's a GPU available...\n",
    "if torch.cuda.is_available():    \n",
    "\n",
    "    # Tell PyTorch to use the GPU.    \n",
    "    device = torch.device(\"cuda:1\")\n",
    "    print('There are %d GPU(s) available.' % torch.cuda.device_count())\n",
    "\n",
    "    print('We will use the GPU:', torch.cuda.get_device_name(1))\n",
    "\n",
    "# If not...\n",
    "else:\n",
    "    print('No GPU available, using the CPU instead.')\n",
    "    device = torch.device(\"cpu\")\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "# !pip install transformers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Parsing the dataset.tsv file ....\n     DONE.\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "print('Parsing the dataset.tsv file ....')\n",
    "# comments = pd.read_csv('./data/attack_annotated_comments.tsv', sep = '\\t', index_col = 0)\n",
    "train = pd.read_csv('./data/balanced_data/train.tsv', sep = '\\t')\n",
    "test = pd.read_csv('./data/balanced_data/test.tsv', sep = '\\t')\n",
    "\n",
    "print('     DONE.')"
   ]
  },
  {
   "source": [
    "# Display the first five rows of the table\n",
    "for i in range(len(train)):\n",
    "    train.iloc[i].len = len(train.iloc[i].raw_text.split())\n",
    "\n",
    "train.head()"
   ],
   "cell_type": "code",
   "metadata": {},
   "execution_count": 32,
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "   story_id                                           raw_text  label  len\n",
       "0   6772845  kiev, january 21 (ria novosti) - former ukrain...      1  160\n",
       "1  35455458  the ukrainian government has called for local ...      0   56\n",
       "2  38817309  investigators probing the shooting down of mal...      0  343\n",
       "3   7765503  kharkov, april 21 (ria novosti) - russia and u...      1  227\n",
       "4  34816385  moscow, june 27 (ria novosti) - osce monitors ...      1  250"
      ],
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>story_id</th>\n      <th>raw_text</th>\n      <th>label</th>\n      <th>len</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>6772845</td>\n      <td>kiev, january 21 (ria novosti) - former ukrain...</td>\n      <td>1</td>\n      <td>160</td>\n    </tr>\n    <tr>\n      <th>1</th>\n      <td>35455458</td>\n      <td>the ukrainian government has called for local ...</td>\n      <td>0</td>\n      <td>56</td>\n    </tr>\n    <tr>\n      <th>2</th>\n      <td>38817309</td>\n      <td>investigators probing the shooting down of mal...</td>\n      <td>0</td>\n      <td>343</td>\n    </tr>\n    <tr>\n      <th>3</th>\n      <td>7765503</td>\n      <td>kharkov, april 21 (ria novosti) - russia and u...</td>\n      <td>1</td>\n      <td>227</td>\n    </tr>\n    <tr>\n      <th>4</th>\n      <td>34816385</td>\n      <td>moscow, june 27 (ria novosti) - osce monitors ...</td>\n      <td>1</td>\n      <td>250</td>\n    </tr>\n  </tbody>\n</table>\n</div>"
     },
     "metadata": {},
     "execution_count": 32
    }
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_split(text1):\n",
    "    split_size = 200\n",
    "    overlap_size = 50\n",
    "    v1 = split_size - overlap_size\n",
    "    l_total = []\n",
    "    l_parcial = []\n",
    "    if len(text1.split())//v1 >0:\n",
    "        n = len(text1.split())//v1\n",
    "    else: \n",
    "        n = 1\n",
    "    for w in range(n):\n",
    "        if w == 0:\n",
    "            l_parcial = text1.split()[:split_size]\n",
    "            l_total.append(\" \".join(l_parcial))\n",
    "        else:\n",
    "            l_parcial = text1.split()[w*v1:w*v1 + split_size]\n",
    "            l_total.append(\" \".join(l_parcial))\n",
    "    return l_total"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "   story_id                                           raw_text  label  len  \\\n",
       "0   6772845  kiev, january 21 (ria novosti) - former ukrain...      1  160   \n",
       "1  35455458  the ukrainian government has called for local ...      0   56   \n",
       "2  38817309  investigators probing the shooting down of mal...      0  343   \n",
       "3   7765503  kharkov, april 21 (ria novosti) - russia and u...      1  227   \n",
       "4  34816385  moscow, june 27 (ria novosti) - osce monitors ...      1  250   \n",
       "\n",
       "                                          text_split  \n",
       "0  [kiev, january 21 (ria novosti) - former ukrai...  \n",
       "1  [the ukrainian government has called for local...  \n",
       "2  [investigators probing the shooting down of ma...  \n",
       "3  [kharkov, april 21 (ria novosti) - russia and ...  \n",
       "4  [moscow, june 27 (ria novosti) - osce monitors...  "
      ],
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>story_id</th>\n      <th>raw_text</th>\n      <th>label</th>\n      <th>len</th>\n      <th>text_split</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>6772845</td>\n      <td>kiev, january 21 (ria novosti) - former ukrain...</td>\n      <td>1</td>\n      <td>160</td>\n      <td>[kiev, january 21 (ria novosti) - former ukrai...</td>\n    </tr>\n    <tr>\n      <th>1</th>\n      <td>35455458</td>\n      <td>the ukrainian government has called for local ...</td>\n      <td>0</td>\n      <td>56</td>\n      <td>[the ukrainian government has called for local...</td>\n    </tr>\n    <tr>\n      <th>2</th>\n      <td>38817309</td>\n      <td>investigators probing the shooting down of mal...</td>\n      <td>0</td>\n      <td>343</td>\n      <td>[investigators probing the shooting down of ma...</td>\n    </tr>\n    <tr>\n      <th>3</th>\n      <td>7765503</td>\n      <td>kharkov, april 21 (ria novosti) - russia and u...</td>\n      <td>1</td>\n      <td>227</td>\n      <td>[kharkov, april 21 (ria novosti) - russia and ...</td>\n    </tr>\n    <tr>\n      <th>4</th>\n      <td>34816385</td>\n      <td>moscow, june 27 (ria novosti) - osce monitors ...</td>\n      <td>1</td>\n      <td>250</td>\n      <td>[moscow, june 27 (ria novosti) - osce monitors...</td>\n    </tr>\n  </tbody>\n</table>\n</div>"
     },
     "metadata": {},
     "execution_count": 34
    }
   ],
   "source": [
    "train['text_split'] = train['raw_text'].apply(get_split)\n",
    "train.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "output_type": "display_data",
     "data": {
      "text/plain": "HBox(children=(FloatProgress(value=0.0, max=8390.0), HTML(value='')))",
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "c50328c2ecb14532a8dc683ebf2884e6"
      }
     },
     "metadata": {}
    },
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "\nlength of train_split_v is: 19057\n"
     ]
    },
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "   story_id   chunk_num                                           raw_text  \\\n",
       "0   6772845   6772845_0  kiev, january 21 (ria novosti) - former ukrain...   \n",
       "1  35455458  35455458_0  the ukrainian government has called for local ...   \n",
       "2  38817309  38817309_0  investigators probing the shooting down of mal...   \n",
       "3  38817309  38817309_1  investigators probing the shooting down of mal...   \n",
       "4   7765503   7765503_0  kharkov, april 21 (ria novosti) - russia and u...   \n",
       "\n",
       "                                          text_chunk  label  \n",
       "0  kiev, january 21 (ria novosti) - former ukrain...      1  \n",
       "1  the ukrainian government has called for local ...      0  \n",
       "2  investigators probing the shooting down of mal...      0  \n",
       "3  at ukraine's military. members of the joint in...      0  \n",
       "4  kharkov, april 21 (ria novosti) - russia and u...      1  "
      ],
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>story_id</th>\n      <th>chunk_num</th>\n      <th>raw_text</th>\n      <th>text_chunk</th>\n      <th>label</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>6772845</td>\n      <td>6772845_0</td>\n      <td>kiev, january 21 (ria novosti) - former ukrain...</td>\n      <td>kiev, january 21 (ria novosti) - former ukrain...</td>\n      <td>1</td>\n    </tr>\n    <tr>\n      <th>1</th>\n      <td>35455458</td>\n      <td>35455458_0</td>\n      <td>the ukrainian government has called for local ...</td>\n      <td>the ukrainian government has called for local ...</td>\n      <td>0</td>\n    </tr>\n    <tr>\n      <th>2</th>\n      <td>38817309</td>\n      <td>38817309_0</td>\n      <td>investigators probing the shooting down of mal...</td>\n      <td>investigators probing the shooting down of mal...</td>\n      <td>0</td>\n    </tr>\n    <tr>\n      <th>3</th>\n      <td>38817309</td>\n      <td>38817309_1</td>\n      <td>investigators probing the shooting down of mal...</td>\n      <td>at ukraine's military. members of the joint in...</td>\n      <td>0</td>\n    </tr>\n    <tr>\n      <th>4</th>\n      <td>7765503</td>\n      <td>7765503_0</td>\n      <td>kharkov, april 21 (ria novosti) - russia and u...</td>\n      <td>kharkov, april 21 (ria novosti) - russia and u...</td>\n      <td>1</td>\n    </tr>\n  </tbody>\n</table>\n</div>"
     },
     "metadata": {},
     "execution_count": 35
    }
   ],
   "source": [
    "# create a row split version of dataset \n",
    "\n",
    "from tqdm import tqdm_notebook as tqdm\n",
    "tmp = []\n",
    "\n",
    "for i in tqdm(range(len(train))):\n",
    "    for j in range(len(train.iloc[i].text_split)):\n",
    "        chunk_num = str(train.iloc[i]['story_id']) + '_' + str(j)\n",
    "        tmp.append(\n",
    "        {'story_id': train.iloc[i]['story_id'],\n",
    "            'chunk_num': chunk_num,\n",
    "            'raw_text': train.iloc[i]['raw_text'],\n",
    "            'text_chunk': train.iloc[i]['text_split'][j],\n",
    "            'label': train.iloc[i].label}\n",
    "        )\n",
    "\n",
    "train_split_v = pd.DataFrame(tmp) \n",
    "# train.head()\n",
    "print('length of train_split_v is:', len(train_split_v))\n",
    "train_split_v.head()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "length of train_split_v is: 19057\n"
     ]
    }
   ],
   "source": [
    "print('length of train_split_v is:', len(train_split_v))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "# import textwrap\n",
    "# import random \n",
    "\n",
    "# # wrap text to 80 characters.\n",
    "\n",
    "# wrapper = textwrap.TextWrapper(width = 100)\n",
    "\n",
    "# # filter to just \"attack\" comments.\n",
    "\n",
    "# examples = train.query('label')['raw_text']\n",
    "\n",
    "# # Randomly choose some examples\n",
    "\n",
    "# for i in range(10):\n",
    "#     j = random.choice(examples.index)\n",
    "    \n",
    "#     print('')\n",
    "#     print(wrapper.fill(examples[j]))\n",
    "#     print('')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "loading BERT tokenizer....\n",
      "    DONE.\n"
     ]
    }
   ],
   "source": [
    "from transformers import BertTokenizer\n",
    "\n",
    "# load the BERT tokenizer, \n",
    "print('loading BERT tokenizer....')\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case = True)\n",
    "print('    DONE.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # Retrieve the text from the first comment.\n",
    "# text = train.iloc[10].raw_text\n",
    "\n",
    "# tokens = tokenizer.tokenize(text)\n",
    "\n",
    "# # Print the original text\n",
    "# print('comment 0 (not an attack) contains {:,} wordpiece tokens.'.format(len(tokens)))\n",
    "# print('\\nOriginal comment text:\\n')\n",
    "# print(wrapper.fill(text))\n",
    "\n",
    "\n",
    "# # print out the list of tokens\n",
    "# print('**** First 512 tokens:*****\\n')\n",
    "# print(wrapper.fill(str(' '.join(tokens[0:512]))))\n",
    "\n",
    "# print('')\n",
    "\n",
    "# print('\\n****** Remaining {:,} tokens: *****\\n'.format(len(tokens) - 512))\n",
    "# print(wrapper.fill(str(' '.join(tokens[512:]))))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # First truncate the text to remove the last 79 tokens (which begin with the words \"are made in\"). \n",
    "# last_char = text.find('are made in')\n",
    "\n",
    "# # Truncate the text to only what fits in the 512 tokens.\n",
    "# text = text[0:last_char]\n",
    "\n",
    "# # Estimate the number of words in the comment by spliting it on whitespace.\n",
    "# # first remove all double spaces.\n",
    "# text = text.replace('  ', ' ')\n",
    "# num_words = len(text.split(' '))\n",
    "# print('Comment contains ~{:,} words.'.format(num_words))\n",
    "\n",
    "# # Estimate the number of sentences by counting up the periods.\n",
    "# num_sens = text.count('. ')\n",
    "# print('Comment contains ~{:,} sentences.'.format(num_sens))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Tokenizing comments.....\n"
     ]
    },
    {
     "output_type": "display_data",
     "data": {
      "text/plain": "HBox(children=(FloatProgress(value=0.0, max=19057.0), HTML(value='')))",
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "4f90a40d2a894d4aacf484a9f428bb9c"
      }
     },
     "metadata": {}
    },
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      " Read 0 texts.\n",
      "\n",
      "DONE\n",
      "    19,057 comments\n",
      "   Min length: 18 tokens\n",
      "   Max length: 395 tokens\n",
      "Median length: 251.0 tokens\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "from tqdm import tqdm_notebook as tqdm\n",
    "\n",
    "# Tokenize all the sentences and map the tokens to their word IDs.\n",
    "\n",
    "input_ids = []\n",
    "\n",
    "# Record the length of each sequence (after truncating to 512).\n",
    "\n",
    "lengths = []\n",
    "\n",
    "print('Tokenizing comments.....')\n",
    "\n",
    "# For every sentence....\n",
    "for text in tqdm(train_split_v.text_chunk):\n",
    "    # Report progress.\n",
    "    if ((len(input_ids) % 20000) == 0):\n",
    "        print(' Read {:,} texts.'.format(len(input_ids)))\n",
    "        \n",
    "    # endoce will: \n",
    "    # (1) Tokenize the sentence\n",
    "    # (2) Prepend the \"[CLS]\" token to the start.\n",
    "    # (3) Append the \"[SEP]\" token to the end.\n",
    "    # (4) Map tokens to their IDs. \n",
    "    encoded_text = tokenizer.encode(\n",
    "        text,                          #sentence to encode.\n",
    "        add_special_tokens = True,   # Add '[CLS]' and '[SEP]'\n",
    "#             max_length = 512,            # Truncate all the sentences.\n",
    "#             return_tensors = 'pt'        # Return pytorch tensors.\n",
    "    )\n",
    "    # Add the encoded sentence to the list\n",
    "    input_ids.append(encoded_text)\n",
    "    \n",
    "    # Record the truncated length.\n",
    "    lengths.append(len(encoded_text))\n",
    "    \n",
    "print('DONE')\n",
    "print('{:>10,} comments'.format(len(input_ids)))\n",
    "\n",
    "print('   Min length: {:,} tokens'.format(min(lengths)))\n",
    "print('   Max length: {:,} tokens'.format(max(lengths)))\n",
    "print('Median length: {:,} tokens'.format(np.median(lengths)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "  7,380 label as 1 \n 11,677 labels as 0\n"
     ]
    }
   ],
   "source": [
    "# ALso retreive the labels as a list\n",
    "\n",
    "# Get the labels from dataframe, and convert from booleans to ints.\n",
    "\n",
    "labels = train_split_v.label.to_numpy().astype(int)\n",
    "print('{:>7,} label as 1 '.format(np.sum(labels)))\n",
    "print('{:>7,} labels as 0'.format(len(labels)-np.sum(labels)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "# import matplotlib.pyplot as plt\n",
    "# import seaborn as sns\n",
    "# import numpy as np\n",
    "\n",
    "# sns.set(style='darkgrid')\n",
    "\n",
    "# # Increase the plot size and font size.\n",
    "# sns.set(font_scale=1.5)\n",
    "# plt.rcParams['figure.figsize'] = (10,5)\n",
    "\n",
    "# # Truncate any comment lengths greater tahn 512.\n",
    "# lengths = [min(l, 512) for l in lengths]\n",
    "\n",
    "# # Plot the distribution of comment lengths.\n",
    "# sns.distplot(lengths, kde=False, rug=False)\n",
    "# plt.title('Comment lengths')\n",
    "# plt.xlabel('Comment length')\n",
    "# plt.ylabel ('# of comments')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # count the number of sentences that had to be truncated to 512 tokens. \n",
    "# num_truncated = lengths.count(512)\n",
    "\n",
    "# # compare this to the total number of training sentences.\n",
    "# num_sentences = len(lengths)\n",
    "# prcnt = float(num_truncated)/ float(num_sentences)\n",
    "# print('{:,} of {:,} sentences ({:.1%}) in the training set are longer than 512 tokens.'.format(num_truncated, num_sentences, prcnt))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "\n",
      "Padding/truncating all sentences to 512 values...\n",
      "\n",
      "Padding token: \"[PAD]\", ID: 0\n",
      "\n",
      "Done.\n"
     ]
    }
   ],
   "source": [
    "# We'll borrow the `pad_sequences` utility function to do this.\n",
    "from keras.preprocessing.sequence import pad_sequences\n",
    "\n",
    "# Set the maximum sequence length.\n",
    "MAX_LEN = 512\n",
    "\n",
    "print('\\nPadding/truncating all sentences to %d values...' % MAX_LEN)\n",
    "\n",
    "print('\\nPadding token: \"{:}\", ID: {:}'.format(tokenizer.pad_token, tokenizer.pad_token_id))\n",
    "\n",
    "# Pad our input tokens with value 0.\n",
    "# \"post\" indicates that we want to pad and truncate at the end of the sequence,\n",
    "# as opposed to the beginning.\n",
    "input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype=\"long\", \n",
    "                          value=0, truncating=\"post\", padding=\"post\")\n",
    "\n",
    "print('\\nDone.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create attention masks\n",
    "attention_masks = []\n",
    "\n",
    "# For each sentence...\n",
    "for sent in input_ids:\n",
    "    \n",
    "    # Create the attention mask.\n",
    "    #   - If a token ID is 0, then it's padding, set the mask to 0.\n",
    "    #   - If a token ID is > 0, then it's a real token, set the mask to 1.\n",
    "    att_mask = [int(token_id > 0) for token_id in sent]\n",
    "    \n",
    "    # Store the attention mask for this sentence.\n",
    "    attention_masks.append(att_mask)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Use train_test_split to split our data into train and validation sets for\n",
    "# training\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "# Use 90% for training and 10% for validation.\n",
    "train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids, labels, \n",
    "                                                            random_state=2018, test_size=0.1)\n",
    "# Do the same for the masks.\n",
    "train_masks, validation_masks, _, _ = train_test_split(attention_masks, labels,\n",
    "                                             random_state=2018, test_size=0.1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Convert all inputs and labels into torch tensors, the required datatype \n",
    "# for our model.\n",
    "train_inputs = torch.tensor(train_inputs)\n",
    "validation_inputs = torch.tensor(validation_inputs)\n",
    "\n",
    "train_labels = torch.tensor(train_labels)\n",
    "validation_labels = torch.tensor(validation_labels)\n",
    "\n",
    "train_masks = torch.tensor(train_masks)\n",
    "validation_masks = torch.tensor(validation_masks)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n",
    "\n",
    "# The DataLoader needs to know our batch size for training, so we specify it \n",
    "# here.\n",
    "# For fine-tuning BERT on a specific task, the authors recommend a batch size of\n",
    "# 16 or 32.\n",
    "\n",
    "batch_size = 16\n",
    "\n",
    "# Create the DataLoader for our training set.\n",
    "train_data = TensorDataset(train_inputs, train_masks, train_labels)\n",
    "train_sampler = RandomSampler(train_data)\n",
    "train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)\n",
    "\n",
    "# Create the DataLoader for our validation set.\n",
    "validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)\n",
    "validation_sampler = SequentialSampler(validation_data)\n",
    "validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias']\n",
      "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).\n",
      "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['classifier.weight', 'classifier.bias']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    }
   ],
   "source": [
    "from transformers import BertForSequenceClassification, AdamW, BertConfig\n",
    "\n",
    "# Load BertForSequenceClassification, the pretrained BERT model with a single \n",
    "# linear classification layer on top. \n",
    "model = BertForSequenceClassification.from_pretrained(\n",
    "    \"bert-base-uncased\", # Use the 12-layer BERT model, with an uncased vocab.\n",
    "    num_labels = 2, # The number of output labels--2 for binary classification.\n",
    "                    # You can increase this for multi-class tasks.   \n",
    "    output_attentions = False, # Whether the model returns attentions weights.\n",
    "    output_hidden_states = False, # Whether the model returns all hidden-states.\n",
    ")\n",
    "\n",
    "# Tell pytorch to run this model on the GPU.\n",
    "if torch.cuda.is_available():\n",
    "        model.cuda()\n",
    "        model = torch.nn.DataParallel(model)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "module.bert.embeddings.word_embeddings.weight torch.Size([30522, 768])\nmodule.bert.embeddings.position_embeddings.weight torch.Size([512, 768])\nmodule.bert.embeddings.token_type_embeddings.weight torch.Size([2, 768])\nmodule.bert.embeddings.LayerNorm.weight torch.Size([768])\nmodule.bert.embeddings.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.0.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.0.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.0.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.0.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.0.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.0.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.0.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.0.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.0.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.0.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.0.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.0.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.0.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.0.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.0.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.0.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.1.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.1.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.1.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.1.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.1.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.1.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.1.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.1.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.1.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.1.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.1.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.1.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.1.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.1.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.1.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.1.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.2.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.2.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.2.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.2.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.2.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.2.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.2.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.2.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.2.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.2.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.2.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.2.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.2.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.2.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.2.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.2.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.3.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.3.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.3.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.3.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.3.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.3.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.3.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.3.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.3.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.3.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.3.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.3.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.3.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.3.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.3.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.3.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.4.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.4.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.4.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.4.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.4.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.4.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.4.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.4.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.4.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.4.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.4.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.4.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.4.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.4.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.4.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.4.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.5.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.5.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.5.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.5.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.5.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.5.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.5.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.5.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.5.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.5.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.5.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.5.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.5.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.5.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.5.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.5.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.6.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.6.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.6.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.6.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.6.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.6.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.6.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.6.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.6.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.6.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.6.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.6.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.6.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.6.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.6.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.6.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.7.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.7.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.7.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.7.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.7.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.7.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.7.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.7.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.7.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.7.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.7.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.7.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.7.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.7.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.7.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.7.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.8.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.8.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.8.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.8.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.8.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.8.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.8.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.8.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.8.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.8.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.8.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.8.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.8.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.8.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.8.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.8.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.9.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.9.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.9.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.9.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.9.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.9.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.9.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.9.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.9.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.9.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.9.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.9.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.9.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.9.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.9.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.9.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.10.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.10.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.10.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.10.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.10.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.10.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.10.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.10.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.10.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.10.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.10.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.10.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.10.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.10.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.10.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.10.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.11.attention.self.query.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.11.attention.self.query.bias torch.Size([768])\nmodule.bert.encoder.layer.11.attention.self.key.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.11.attention.self.key.bias torch.Size([768])\nmodule.bert.encoder.layer.11.attention.self.value.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.11.attention.self.value.bias torch.Size([768])\nmodule.bert.encoder.layer.11.attention.output.dense.weight torch.Size([768, 768])\nmodule.bert.encoder.layer.11.attention.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.11.attention.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.11.attention.output.LayerNorm.bias torch.Size([768])\nmodule.bert.encoder.layer.11.intermediate.dense.weight torch.Size([3072, 768])\nmodule.bert.encoder.layer.11.intermediate.dense.bias torch.Size([3072])\nmodule.bert.encoder.layer.11.output.dense.weight torch.Size([768, 3072])\nmodule.bert.encoder.layer.11.output.dense.bias torch.Size([768])\nmodule.bert.encoder.layer.11.output.LayerNorm.weight torch.Size([768])\nmodule.bert.encoder.layer.11.output.LayerNorm.bias torch.Size([768])\nmodule.bert.pooler.dense.weight torch.Size([768, 768])\nmodule.bert.pooler.dense.bias torch.Size([768])\nmodule.classifier.weight torch.Size([2, 768])\nmodule.classifier.bias torch.Size([2])\n----\n"
     ]
    }
   ],
   "source": [
    "# Note: AdamW is a class from the huggingface library (as opposed to pytorch) \n",
    "# I believe the 'W' stands for 'Weight Decay fix\"\n",
    "optimizable_params = []\n",
    "for name, p in model.named_parameters():\n",
    "    if p.requires_grad:\n",
    "#         print(p.name, p.shape)\n",
    "        print(name, p.shape)\n",
    "        if name == 'classifier.bias' or name == 'classifier.weight':\n",
    "            optimizable_params.append(p)\n",
    "# for p in model.parameters():\n",
    "#     if p.requires_grad:\n",
    "# #         print(p.name, p.shape)\n",
    "# #         print(name, p.shape)\n",
    "#         if p.name == 'classifer.bias' or p.name == 'classifier.weight':\n",
    "#             optimizable_params.append(p)\n",
    "print(\"----\")            \n",
    "# print(optimizable_params)\n",
    "# for p in optimizable_params:\n",
    "#     print(p.shape)\n",
    "optimizer = AdamW(model.parameters(),#optimizable_params,\n",
    "                  lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5\n",
    "                  eps = 1e-8 # args.adam_epsilon  - default is 1e-8.\n",
    "                )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import get_linear_schedule_with_warmup\n",
    "\n",
    "# Number of training epochs (authors recommend between 2 and 4)\n",
    "epochs = 20\n",
    "\n",
    "# Total number of training steps is number of batches * number of epochs.\n",
    "total_steps = len(train_dataloader) * epochs\n",
    "\n",
    "# Create the learning rate scheduler.\n",
    "scheduler = get_linear_schedule_with_warmup(optimizer, \n",
    "                                            num_warmup_steps = 0, # Default value in run_glue.py\n",
    "                                            num_training_steps = total_steps)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "# Function to calculate the accuracy of our predictions vs labels\n",
    "def flat_accuracy(preds, labels):\n",
    "    pred_flat = np.argmax(preds, axis=1).flatten()\n",
    "    labels_flat = labels.flatten()\n",
    "    return np.sum(pred_flat == labels_flat) / len(labels_flat)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "import time\n",
    "import datetime\n",
    "\n",
    "def format_time(elapsed):\n",
    "    '''\n",
    "    Takes a time in seconds and returns a string hh:mm:ss\n",
    "    '''\n",
    "    # Round to the nearest second.\n",
    "    elapsed_rounded = int(round((elapsed)))\n",
    "    \n",
    "    # Format as hh:mm:ss\n",
    "    return str(datetime.timedelta(seconds=elapsed_rounded))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_save():\n",
    "    import os\n",
    "\n",
    "    # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n",
    "\n",
    "    output_dir = './model_save/'\n",
    "\n",
    "    # Create output directory if needed\n",
    "    if not os.path.exists(output_dir):\n",
    "        os.makedirs(output_dir)\n",
    "\n",
    "    print(\"Saving model to %s\" % output_dir)\n",
    "\n",
    "    # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n",
    "    # They can then be reloaded using `from_pretrained()`\n",
    "    model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training\n",
    "    model_to_save.save_pretrained(output_dir)\n",
    "    tokenizer.save_pretrained(output_dir)\n",
    "\n",
    "    # Good practice: save your training arguments together with the trained model\n",
    "    # torch.save(args, os.path.join(output_dir, 'training_args.bin'))\n",
    "    return"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [
    {
     "output_type": "error",
     "ename": "AttributeError",
     "evalue": "module 'torch.cuda' has no attribute 'empty'",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-59-df473e1e0ceb>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mempty\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;31mAttributeError\u001b[0m: module 'torch.cuda' has no attribute 'empty'"
     ]
    }
   ],
   "source": [
    "torch.cuda.empty()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "\n======== Epoch 1 / 20 ========\nTraining...\n"
     ]
    },
    {
     "output_type": "error",
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-60-d3e912af68bf>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m    107\u001b[0m         \u001b[0;31m# Clip the norm of the gradients to 1.0.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    108\u001b[0m         \u001b[0;31m# This is to help prevent the \"exploding gradients\" problem.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 109\u001b[0;31m         \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclip_grad_norm_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1.0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    110\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    111\u001b[0m         \u001b[0;31m# Update parameters and take a step using the computed gradient.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/arnav/CIPS/venv/lib/python3.7/site-packages/torch/nn/utils/clip_grad.py\u001b[0m in \u001b[0;36mclip_grad_norm_\u001b[0;34m(parameters, max_norm, norm_type)\u001b[0m\n\u001b[1;32m     33\u001b[0m         \u001b[0mtotal_norm\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnorm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnorm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnorm_type\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mp\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnorm_type\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     34\u001b[0m     \u001b[0mclip_coef\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmax_norm\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mtotal_norm\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m1e-6\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 35\u001b[0;31m     \u001b[0;32mif\u001b[0m \u001b[0mclip_coef\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     36\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mp\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     37\u001b[0m             \u001b[0mp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmul_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mclip_coef\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "import random\n",
    "import os\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "\n",
    "# This training code is based on the `run_glue.py` script here:\n",
    "# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128\n",
    "\n",
    "# Set the seed value all over the place to make this reproducible.\n",
    "seed_val = 42\n",
    "\n",
    "random.seed(seed_val)\n",
    "np.random.seed(seed_val)\n",
    "torch.manual_seed(seed_val)\n",
    "torch.cuda.manual_seed_all(seed_val)\n",
    "\n",
    "# Store the average loss after each epoch so we can plot them.\n",
    "loss_values = []\n",
    "log_dir  = './tensorboard_log/'\n",
    "if not os.path.exists(log_dir):\n",
    "    os.makedirs(log_dir)\n",
    "writer = SummaryWriter(log_dir=log_dir)\n",
    "\n",
    "\n",
    "# For each epoch...\n",
    "for epoch_i in range(0, epochs):\n",
    "    \n",
    "    # ========================================\n",
    "    #               Training\n",
    "    # ========================================\n",
    "    \n",
    "    # Perform one full pass over the training set.\n",
    "\n",
    "    print(\"\")\n",
    "    print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))\n",
    "    print('Training...')\n",
    "\n",
    "    # Measure how long the training epoch takes.\n",
    "    t0 = time.time()\n",
    "\n",
    "    # Reset the total loss for this epoch.\n",
    "    total_loss = []\n",
    "    max_accuracy = 0.65\n",
    "    # Put the model into training mode. Don't be mislead--the call to \n",
    "    # `train` just changes the *mode*, it doesn't *perform* the training.\n",
    "    # `dropout` and `batchnorm` layers behave differently during training\n",
    "    # vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)\n",
    "    model.train()\n",
    "\n",
    "    # For each batch of training data...\n",
    "    for step, batch in enumerate(train_dataloader):\n",
    "\n",
    "        # Progress update every 100 batches.\n",
    "        if step % 200 == 0 and not step == 0:\n",
    "            # Calculate elapsed time in minutes.\n",
    "            elapsed = format_time(time.time() - t0)\n",
    "            \n",
    "            # Report progress.\n",
    "            print('  Batch {:>5,}  of  {:>5,}.    Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))\n",
    "\n",
    "        # Unpack this training batch from our dataloader. \n",
    "        #\n",
    "        # As we unpack the batch, we'll also copy each tensor to the GPU using the \n",
    "        # `to` method.\n",
    "        #\n",
    "        # `batch` contains three pytorch tensors:\n",
    "        #   [0]: input ids \n",
    "        #   [1]: attention masks\n",
    "        #   [2]: labels \n",
    "        b_input_ids = batch[0].cuda()\n",
    "        b_input_mask = batch[1].cuda()\n",
    "        b_labels = batch[2].cuda()\n",
    "\n",
    "        # Always clear any previously calculated gradients before performing a\n",
    "        # backward pass. PyTorch doesn't do this automatically because \n",
    "        # accumulating the gradients is \"convenient while training RNNs\". \n",
    "        # (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)\n",
    "        model.zero_grad()        \n",
    "\n",
    "        # Perform a forward pass (evaluate the model on this training batch).\n",
    "        # This will return the loss (rather than the model output) because we\n",
    "        # have provided the `labels`.\n",
    "        # The documentation for this `model` function is here: \n",
    "        # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification\n",
    "\n",
    "        outputs = model(b_input_ids, \n",
    "                    token_type_ids=None, \n",
    "                    attention_mask=b_input_mask, \n",
    "                    labels=b_labels)\n",
    "        \n",
    "        # The call to `model` always returns a tuple, so we need to pull the \n",
    "        # loss value out of the tuple.\n",
    "        loss = outputs[0].mean()\n",
    "\n",
    "        # Accumulate the training loss over all of the batches so that we can\n",
    "        # calculate the average loss at the end. `loss` is a Tensor containing a\n",
    "        # single value; the `.item()` function just returns the Python value \n",
    "        # from the tensor.\n",
    "\n",
    "        total_loss.append(loss.data.cpu().numpy())\n",
    "        # print(loss)\n",
    "        # if step % 10 == 0:\n",
    "        #     writer.add_scalar('loss', loss, step)\n",
    "\n",
    "        # Perform a backward pass to calculate the gradients.\n",
    "        loss.backward()\n",
    "\n",
    "        # Clip the norm of the gradients to 1.0.\n",
    "        # This is to help prevent the \"exploding gradients\" problem.\n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n",
    "\n",
    "        # Update parameters and take a step using the computed gradient.\n",
    "        # The optimizer dictates the \"update rule\"--how the parameters are\n",
    "        # modified based on their gradients, the learning rate, etc.\n",
    "        optimizer.step()\n",
    "\n",
    "        # Update the learning rate.\n",
    "        scheduler.step()\n",
    "\n",
    "    # Calculate the average loss over the training data.\n",
    "    avg_train_loss = sum(total_loss) / len(train_dataloader)            \n",
    "    \n",
    "    # Store the loss value for plotting the learning curve.\n",
    "    loss_values.append(avg_train_loss)\n",
    "\n",
    "    print(\"\")\n",
    "    print(\"  Average training loss: {0:.2f}\".format(avg_train_loss))\n",
    "    print(\"  Training epcoh took: {:}\".format(format_time(time.time() - t0)))\n",
    "        \n",
    "    # ========================================\n",
    "    #               Validation\n",
    "    # ========================================\n",
    "    # After the completion of each training epoch, measure our performance on\n",
    "    # our validation set.\n",
    "\n",
    "    print(\"\")\n",
    "    print(\"Running Validation...\")\n",
    "\n",
    "    t0 = time.time()\n",
    "\n",
    "    # Put the model in evaluation mode--the dropout layers behave differently\n",
    "    # during evaluation. \n",
    "    model.eval()\n",
    "\n",
    "    # Tracking variables \n",
    "    eval_loss, eval_accuracy = 0, 0\n",
    "    nb_eval_steps, nb_eval_examples = 0, 0\n",
    "\n",
    "    # Evaluate data for one epoch\n",
    "    for batch in validation_dataloader:\n",
    "        \n",
    "        # Add batch to GPU\n",
    "        batch = tuple(t for t in batch)\n",
    "        \n",
    "        # Unpack the inputs from our dataloader\n",
    "        b_input_ids, b_input_mask, b_labels = batch\n",
    "        \n",
    "        # Telling the model not to compute or store gradients, saving memory and\n",
    "        # speeding up validation\n",
    "        with torch.no_grad():        \n",
    "\n",
    "            # Forward pass, calculate logit predictions.\n",
    "            # This will return the logits rather than the loss because we have\n",
    "            # not provided labels.\n",
    "            # token_type_ids is the same as the \"segment ids\", which \n",
    "            # differentiates sentence 1 and 2 in 2-sentence tasks.\n",
    "            # The documentation for this `model` function is here: \n",
    "            # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification\n",
    "            outputs = model(b_input_ids, \n",
    "                            token_type_ids=None, \n",
    "                            attention_mask=b_input_mask)\n",
    "        \n",
    "        # Get the \"logits\" output by the model. The \"logits\" are the output\n",
    "        # values prior to applying an activation function like the softmax.\n",
    "        logits = outputs[0]\n",
    "\n",
    "        # Move logits and labels to CPU\n",
    "        logits = logits.detach().cpu().numpy()\n",
    "        label_ids = b_labels.to('cpu').numpy()\n",
    "        \n",
    "        # Calculate the accuracy for this batch of test sentences.\n",
    "        tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n",
    "        \n",
    "        # Accumulate the total accuracy.\n",
    "        eval_accuracy += tmp_eval_accuracy\n",
    "        \n",
    "        if tmp_eval_accuracy > max_accuracy: \n",
    "            max_accuracy = tmp_eval_accuracy\n",
    "            model_save()\n",
    "        # Track the number of batches\n",
    "        nb_eval_steps += 1\n",
    "    \n",
    "    \n",
    "    # Report the final accuracy for this validation run.\n",
    "    print(\"  Accuracy: {0:.2f}\".format(eval_accuracy/nb_eval_steps))\n",
    "    print(\"  Validation took: {:}\".format(format_time(time.time() - t0)))\n",
    "\n",
    "print(\"\")\n",
    "print(\"Training complete!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(loss.data.cpu().numpy())\n",
    "# !pwd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAvoAAAGXCAYAAADCnfTMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nOzdeUCVZd7/8fdhR0U2QRQUkSO4K+644YLWaFlSbqltWtkTTfM0NeNvnHqapia1RmfGJceWSVNcUrHMCk1zNzUV11QEF3AXWWVffn84nIlRVBS44fB5/cd17vs+30NX+Dn3+V7XMRUXFxcjIiIiIiJWxcboAkREREREpOIp6IuIiIiIWCEFfRERERERK6SgLyIiIiJihRT0RURERESskIK+iIiIiIgVUtAXEZHb+uCDDwgODubKlSv3dH5ubi7BwcG8+eabFVxZ+SxZsoTg4GBiY2MNrUNEpKrYGV2AiIjcWXBw8F0fu2HDBvz8/CqxGhERqQkU9EVEaoDp06eX+nnv3r0sW7aMUaNG0blz51KPeXh4VOhz/+Y3v+Hll1/G0dHxns53dHTk4MGD2NraVmhdIiJyewr6IiI1wCOPPFLq58LCQpYtW0bHjh1veqwsxcXFZGdnU6dOnXI9t52dHXZ29/fPxb2+SRARkXunHn0RESu0ZcsWgoOD+frrr1mwYAEPPvgg7dq1Y9GiRQDs27eP3/3udwwePJgOHTrQqVMnxo4dyw8//HDTtW7Vo18ylpiYyLRp0+jTpw/t2rVj+PDhbN++vdT5t+rR/+XYnj17GDNmDB06dKBHjx68+eabZGdn31THjh07GDFiBO3ataN3795MmzaNo0ePEhwczPz58+/5d3X16lXefPNN+vbtS9u2benfvz/vvPMOaWlppY7Lyspi5syZPPDAA7Rv356uXbvy8MMPM3PmzFLHff/994wZM4bu3bvTvn17+vfvz69//WsSExPvuUYRkXuhO/oiIlbso48+IiMjg8ceewxPT0+aNGkCwHfffcfZs2cZMmQIjRs35tq1a0RHRzNp0iRmzZrF4MGD7+r6v/3tb3F0dGTixInk5uby2Wef8eKLL7J+/XoaNmx4x/MPHTpETEwMjz/+OMOGDWPnzp0sW7YMBwcH/vjHP1qO27lzJ8899xweHh688MIL1KtXj7Vr17Jr1657+8X8W2pqKqNGjeL8+fOMGDGCli1bcujQIRYtWsSuXbtYvnw5zs7OALzxxhusXbuW4cOH07FjR/Lz8zl9+jQ//vij5Xrbtm0jMjKS1q1bM2nSJOrVq8elS5fYvn07SUlJlt+/iEhVUNAXEbFily9f5ttvv8XNza3U+G9+85ubWnjGjx/PsGHD+PDDD+866Dds2JB//OMfmEwmAMsnA1988QWRkZF3PP/48eOsWLGC1q1bAzBmzBieeuopli1bxu9+9zscHBwAeO+997C3t2f58uU0atQIgCeeeILRo0ffVZ1lmTdvHklJSbz77rs8/vjjlvEWLVowbdo0yxuX4uJiNm7cSHh4OO+9916Z1/v+++8BWLBgAS4uLpbxu/ldiIhUNLXuiIhYsccee+ymkA+UCvnZ2dmkpKSQm5tLt27d+Pnnn8nLy7ur6z/11FOWkA/QuXNn7O3tOX369F2d37VrV0vIL9GjRw/y8vK4cOECAOfOneP48eM88MADlpAP4ODgwJNPPnlXz1OWkk8eIiIiSo2PGzcOFxcX1q9fD4DJZKJu3bocP36c+Pj4Mq/n4uJCcXExMTExFBYW3ldtIiL3S3f0RUSsWLNmzW45fvnyZWbOnMkPP/xASkrKTY9nZGTg6el5x+v/dyuKyWTC1dWV1NTUu6rvVq0sJW9MUlNT8ff3JykpCYCAgICbjr3V2N0qLi7m/Pnz9OjRAxub0ve9HBwcaNq0qeW5AaZMmcIf/vAHhgwZgr+/P927d2fAgAH069fP8mbnqaeeYtOmTUyZMoWpU6fSpUsX+vTpw5AhQ3B3d7/nWkVE7oWCvoiIFSvpL/+lwsJCnn76aZKSknjyySdp06YNLi4u2NjYsHTpUmJiYigqKrqr6/93QC5RXFx8X+f/8hp3e63yKu91f/WrX9G9e3e2bNnC7t272bZtG8uXLyc0NJSPP/4YOzs7GjRoQHR0NHv27GHHjh3s2bOHd955h3/84x988skntG3btlJei4jIrSjoi4jUMocPHyY+Pp5XX32VF154odRjJbvyVCclX/516tSpmx671djdsrGxwdfXl4SEBIqKikq96cjLy+Ps2bM0bdq01DkeHh48+uijPProoxQXF/OXv/yFhQsXsmXLFgYMGADc2I40NDSU0NBQ4Mbv+/HHH+ef//wns2bNuud6RUTKSz36IiK1TEmg/e872keOHGHz5s1GlHRbfn5+BAUFERMTY+nbhxthfOHChfd17fDwcC5evMjq1atLjUdFRZGRkcGgQYMAyM/PJzMzs9QxJpOJVq1aAVi24rx27dpNz2E2m3FwcLjrdiYRkYqiO/oiIrVMcHAwzZo148MPPyQ9PZ1mzZoRHx/P8uXLCQ4O5siRI0aXeJPJkyfz3HPPMXLkSEaPHk3dunVZu3ZtqYXA92LSpEmsW7eOP/7xjxw4cIDg4GAOHz7MqlWrCAoK4umnnwZurBcIDw8nPDyc4OBgPDw8SExMZMmSJbi7uxMWFgbA7373O9LT0wkNDcXX15esrCy+/vprcnNzefTRR+/31yAiUi4K+iIitYyDgwMfffQR06dPZ+XKleTm5hIUFMSMGTPYu3dvtQz6vXr14p///Cd/+9vfmDdvHq6urjz00EOEh4czduxYnJyc7um6bm5uLFu2jFmzZrFhwwZWrlyJp6cn48aN4+WXX7ascXBxcWHcuHHs3LmTrVu3kp2djZeXF4MHD+aFF17Aw8MDgIiICL788ktWrVpFSkoKLi4utGjRgrlz5zJw4MAK+32IiNwNU3FlrXISERGpZF999RWvv/46c+bMITw83OhyRESqFfXoi4hItVdUVHTT3v55eXksWLAABwcHOnfubFBlIiLVl1p3RESk2svMzGTIkCE8/PDDNGvWjGvXrrF27Vri4uKIjIzUHvUiIregoC8iItWek5MTvXr1Yt26dVy9ehWA5s2b8/bbbzNq1CiDqxMRqZ7Uoy8iIiIiYoXUoy8iIiIiYoUU9EVERERErJB69CtRSsp1ioqqtjPK07MeycmZdz5QaiXNDymL5oaURXNDyqK5YTwbGxPu7nXLfFxBvxIVFRVXedAveV6Rsmh+SFk0N6QsmhtSFs2N6k2tOyIiIiIiVkhBX0RERETECinoi4iIiIhYIQV9ERERERErpKAvIiIiImKFFPRFRERERKyQgr6IiIiIiBVS0BcRERERsUIK+iIiIiIiVkjfjGsldh65yKrN8VxLz8WjviMRYYGEtvExuiwRERERMYiCvhXYeeQiC749Rl5BEQDJ6bks+PYYgMK+iIiISC2l1h0rsGpzvCXkl8grKGLV5niDKhIRERERoynoW4Hk9NxyjYuIiIiI9VPQtwKe9R3LNS4iIiIi1k9B3wpEhAXiYFf6P6WN6ca4iIiIiNROWoxrBUoW3JbsuuPkaEt2biEeLrqjLyIiIlJbKehbidA2PoS28cHLy4Wk86n88aNdLF5/gv97piu2NvrgRkRERKS2UQK0Qo72towe2IKkK9fZuO+c0eWIiIiIiAEU9K1Up6AGtA3wYPXWBNKu5xldjoiIiIhUMQV9K2UymXhiUBB5+UWs2HTS6HJEREREpIop6FsxH486PNCtKdsPXeTkuTSjyxERERGRKqSgb+Ue6umPu4sji9Ydp6io2OhyRERERKSKKOhbOScHO0YNMHP2UiabY7UwV0RERKS2UNCvBbq29KaVvzurtiSQkaWFuSIiIiK1gYJ+LVCyMDcnr5CVmxOMLkdEREREqoCCfi3h26AuAzv7sfXAeU5dSDe6HBERERGpZAr6tcgjvQOoX9fhxsLcYi3MFREREbFmCvq1iLOjHSP7mzl1IYNtBy8YXY6IiIiIVCIF/VqmR5uGtPBzZcWmeDKz840uR0REREQqiYJ+LWMymRg7KIjrOflEb9XCXBERERFrpaBfCzVt6MKATn5s2n+OMxczjC5HRERERCqBgn4tNbxPAPWc7Vm8/oQW5oqIiIhYIQX9WqqOkz2P9wvk5Lk0dh6+aHQ5IiIiIlLBFPRrsV7tGhHYuD5fbIonK6fA6HJEREREpAIp6NdiNiYTYwcHkXE9jy+3nTK6HBERERGpQAr6tVwzn/qEhfiyYW8SSZczjS5HRERERCqIgr4Q0bc5zo62LFp/gmItzBURERGxCgr6Qj1nex7rF8iJxFR2/XzJ6HJEREREpAIo6AsAfds3xt/HheUbT5Kdq4W5IiIiIjWdgr4AYGNjYtzgIFIz81iz47TR5YiIiIjIfVLQF4vAxq70bt+I9XsSOX/1utHliIiIiMh9UNCXUh7vF4ijvS1R32throiIiEhNZnjQv379Ou+88w69e/emffv2REREsGHDhrs6t7i4mGXLlhEREUGHDh3o0qULI0eOZN++faWOi4uLY9KkSXTu3JmOHTsyduxY9uzZc9P1Vq9ezSuvvEJ4eDjBwcGMHz++Ql5jTVK/jgPD+zbn6OkU9h6/YnQ5IiIiInKP7IwuIDIykqNHj/Laa6/h5+dHdHQ0kZGRzJs3j7CwsNueO2XKFNatW8fEiRMJCQkhOzubw4cPk52dbTkmMTGRMWPG4OPjw5///GecnZ1ZvHgxzzzzDAsXLqRTp06WY7/88kuSk5Pp2rVrqWvUNv1CGrPlwHmWboyjXXNPHB1sjS5JRERERMrJ0KC/efNmduzYwezZsxk0aBAAPXr0IDExkalTp9426MfExBAdHU1UVBQhISGW8X79+pU6bv78+eTn5/Ppp5/i7e0NQJ8+fRg6dCjTp09n6dKllmM/+eQTbGxufMjxyCOPVNTLrHFsbWwYOyiIqYv38fXO0zwWFmh0SSIiIiJSToa27qxfvx4XFxcGDhxoGTOZTAwfPpyEhAROnjxZ5rmLFi2iS5cupUL+rcTGxtK6dWtLyAews7OjT58+7N+/n8uXL1vGS0K+QFATN0Lb+BCz+yyXrmUZXY6IiIiIlJOhyTYuLg6z2XxTwA4ODgbgxIkTtzwvPz+f2NhYgoODmTFjBj179qR169YMHTqU6OjoUsfm5eXh4OBw0zVKxuLi4iripVilEf0DsbO1Ier7OC3MFREREalhDA36qampuLq63jReMpaamlrmeXl5eURHR7NhwwbeeOMNPvroI4KCgpg8eTLLly+3HGs2mzl27NhNPfclC3ZTUlIq6uVYHbd6jjzaO4BDCcnEnrxqdDkiIiIiUg6GL8Y1mUzlfqyoqAiA3Nxc5s+fj6+vLwA9e/YkMTGROXPmMHLkSADGjRvHhg0b+P3vf8/rr7+Os7Mzn3/+OQcPHgQqt13H07NepV37dry8XCrsWqMebMWOo5dY9kM8YV39cbTXwtyariLnh1gXzQ0pi+aGlEVzo3ozNOi7ubnd8q59WloawC3v9peMm0wmmjdvbgn5cOONQZ8+fZg7dy7Jycl4enoSGhrKe++9x9SpUwkPDwdu3OV/5ZVXmDFjRqne/YqWnJxJUVHVtrx4eblw5UpGhV5zdH8z05fs5/Ovj/BI74AKvbZUrcqYH2IdNDekLJobUhbNDePZ2Jhue2PZ0NYds9lMfHy85Q59iZLe/KCgoFue5+TkhL+//y0fK+kl/+WnAcOHD2f79u188803rFu3jrVr11JcXIyzszNt2rSpiJdi1Vr6u9OtlTff/HiGK6m1d9tRERERkZrE0KA/aNAg0tPT2bhxY6nx1atXExAQgNlsvu25CQkJJCUlWcaKi4vZsmULTZo0wcPDo9TxdnZ2BAYG4u/vT3p6OsuXLyciIgJnZ+eKfVFWamR/MzYmE0s3aPGyiIiISE1gaOtOWFgY3bt3Z8qUKaSmpuLn58fq1avZu3cvc+fOtRw3fvx4du/ezfHjxy1jEyZMYM2aNUycOJHIyEhcXFxYuXIlR44cYebMmZbjLl++zMKFCwkJCaFOnTokJCTw8ccf4+LiwquvvlqqnpMnT1q29MzIyKCgoIDvvvsOgHbt2pVqE6ptPOo7MaxXM77YFM/B+GTaB3oaXZKIiIiI3IahQd9kMjF37lxmzJjBzJkzSU9Px2w2M3v2bAYMGHDbc93d3Vm8eDHTp0/nT3/6Ezk5OQQFBTFnzhxLLz7cuJN/9OhRVqxYQWZmJg0bNmTIkCG8+OKL1KtXuqfp22+/Zfbs2aXGXnnlFQDee+89IiIiKuiV10yDujZh68ELRH1/glb+3bG30/cOiIiIiFRXpmJtkF5prGUx7i8dPpXMjGUHiOjbnId6Nqu055HKoYVTUhbNDSmL5oaURXPDeNV6Ma7UPG0DPOkc5MXXO06TnJZjdDkiIiIiUgYFfSm3UQNvLJJetlELc0VERESqKwV9KbcGrs4MDfXnp+NXOHL6mtHliIiIiMgtKOjLPXmwe1O83ZyJWn+CgsKiO58gIiIiIlVKQV/uib2dLWPCW3AhOYv1PyUaXY6IiIiI/BcFfblnHcwN6GhuwFfbT5OSkWt0OSIiIiLyCwr6cl9Gh7egsLCY5T+cNLoUEREREfkFBX25L95uzgzp0ZRdRy9x/GyK0eWIiIiIyL8p6Mt9+1UPfzzrO7FIC3NFREREqg0FfblvjvY3Fuaeu3KdH/adM7ocEREREUFBXypISIsGtA3wYPW2BNIytTBXRERExGgK+lIhTCYTTwwKIi+/iBWb4o0uR0RERKTWU9CXCuPjUYcHujVl++GLnExKM7ocERERkVpNQV8q1MM9m+Hu4siidccpKio2uhwRERGRWktBXyqUo4Mtowe24OzlTDbFamGuiIiIiFEU9KXCdQn2opW/O6s2J5CelWd0OSIiIiK1koK+VLiShbm5+YWs2qyFuSIiIiJGUNCXSuHboC7hXfzYeuACCefTjS5HREREpNZR0JdKM6xXAPXrOdxYmFushbkiIiIiVUlBXyqNs6MdI/ubOX0xg60HzhtdjoiIiEitoqAvlapH64YE+bmycnMCmdn5RpcjIiIiUmso6EulMplMjB0cTFZOAdFbEowuR0RERKTWUNCXStfEux4DOvmyaf85zlzMMLocERERkVpBQV+qxKN9AnCpY8+i9VqYKyIiIlIVFPSlStRxsufxfmbiz6Wz8/BFo8sRERERsXoK+lJlerbzIbBxfb744SRZOVqYKyIiIlKZFPSlytiYTIwbHExGVj6rt50yuhwRERERq6agL1XK38eFsBBfNu49R9LlTKPLEREREbFaCvpS5SL6NqeOkx2L1p+gWAtzRURERCqFgr5UuXrO9kSENedEYiq7jl4yuhwRERERq6SgL4bo274xzXxcWPbDSbJzC4wuR0RERMTqKOiLIWxsTIwdHERaZh5rtp82uhwRERERq6OgL4YJbOxKn/aNWP9TIueuXje6HBERERGroqAvhnqsXyCO9rZEaWGuiIiISIVS0BdD1a/jwPC+zfn5TAo/Hb9idDkiIiIiVkNBXwzXP8SXpt71WLohjty8QqPLEREREbEKCvpiuJKFuSkZuXy987TR5YiIiIhYBQV9qRZa+LnRs60P3+06y8VrWUaXIyIiIlLjKehLtTGiXyAO9jZEfa+FuSIiIiL3S0Ffqg3Xeo480rs5hxOuERt31ehyRERERGo0BX2pVgZ08sW3QV2WbIgjL18Lc0VERETulYK+VCt2tjaMHRTE1bQcvvnxjNHliIiIiNRYhgb969ev884779C7d2/at29PREQEGzZsuKtzi4uLWbZsGREREXTo0IEuXbowcuRI9u3bV+q4uLg4Jk2aROfOnenYsSNjx45lz549t7zm9u3bGTlyJO3btyc0NJQ333yT9PT0+36dUj4t/d3p1sqbb348y+XUbKPLEREREamRDA36kZGRrFmzhldeeYV//vOfmM1mIiMj2bx58x3PnTJlCu+//z6DBw9m/vz5fPDBB/Tt25fs7P8Ew8TERMaMGUNSUhJ//vOfmTlzJs7OzjzzzDM3vSHYtWsXzz//PD4+PsybN4/f//73bNy4keeff56ioqIKf+1ye6MGtMDWxsTS7+OMLkVERESkRrIz6ok3b97Mjh07mD17NoMGDQKgR48eJCYmMnXqVMLCwso8NyYmhujoaKKioggJCbGM9+vXr9Rx8+fPJz8/n08//RRvb28A+vTpw9ChQ5k+fTpLly61HPv+++/TokUL/va3v2Fjc+P9j5eXF88++yzfffcdQ4YMqaiXLnfB3cWRYb2a8cWmeA7GX6V9YAOjSxIRERGpUQy7o79+/XpcXFwYOHCgZcxkMjF8+HASEhI4efJkmecuWrSILl26lAr5txIbG0vr1q0tIR/Azs6OPn36sH//fi5fvgzApUuXOHToEI888ogl5AP06tWLhg0bEhMTc68vU+7DoK5N8PGoQ9T6OPILtDBXREREpDwMC/pxcXGYzeZSwRogODgYgBMnTtzyvPz8fGJjYwkODmbGjBn07NmT1q1bM3ToUKKjo0sdm5eXh4ODw03XKBmLi4sr9VwtWrS46digoCDLcVK1ShbmXk7N5rvdiUaXIyIiIlKjGBb0U1NTcXV1vWm8ZCw1NbXM8/Ly8oiOjmbDhg288cYbfPTRRwQFBTF58mSWL19uOdZsNnPs2LFSffuApT8/JSWl1HOVVU9ZtUjlaxPgQedgL9buOM3VNC3MFREREblbhvXow41WnfI+VrIwNjc3l/nz5+Pr6wtAz549SUxMZM6cOYwcORKAcePGsWHDBn7/+9/z+uuv4+zszOeff87BgwcBbvo0oaznvF2dt+PpWe+ezrtfXl4uhjxvZfmfER15cdpGVm8/zf97qpvR5dR41jY/pOJobkhZNDekLJob1ZthQd/Nze2Wd8rT0tKAW99dLxk3mUw0b97cEvLhRhjv06cPc+fOJTk5GU9PT0JDQ3nvvfeYOnUq4eHhwI27/K+88gozZsyw9O67ubkBt/4UIS0trcxa7iQ5OZOiouJ7OvdeeXm5cOVKRpU+Z2UzAUND/YneksCm3WdoE+BhdEk1ljXOD6kYmhtSFs0NKYvmhvFsbEy3vbFsWOuO2WwmPj7+pq0rS/rlg4KCbnmek5MT/v7+t3ysuPhGqP7lHfjhw4ezfft2vvnmG9atW8fatWspLi7G2dmZNm3aAP/pzb9VL/6JEydu2bsvVevBbk3wdnNm8foTFBRqu1MRERGROzEs6A8aNIj09HQ2btxYanz16tUEBARgNptve25CQgJJSUmWseLiYrZs2UKTJk3w8Ch9x9fOzo7AwED8/f1JT09n+fLlRERE4OzsDICPjw9t27ZlzZo1pd547Ny5k0uXLjF48OCKeMlyH+ztbHliUAsuXsti/R4tzBURERG5E8Nad8LCwujevTtTpkwhNTUVPz8/Vq9ezd69e5k7d67luPHjx7N7926OHz9uGZswYQJr1qxh4sSJREZG4uLiwsqVKzly5AgzZ860HHf58mUWLlxISEgIderUISEhgY8//hgXFxdeffXVUvW89tprTJgwgVdffZVRo0Zx6dIlPvjgAzp06MCDDz5Y+b8QuaP2gQ3oaG7AV9tP06OND+4ujkaXJCIiIlJtmYpL+l0MkJmZyYwZM4iJiSE9PR2z2cxLL71k6aeHWwd9gKSkJKZPn87OnTvJyckhKCiIF198sdS5165d47XXXuPo0aNkZmbSsGFDHnzwQV588UXq1bu5n2nLli3MmjWLY8eOUbduXcLDw3n99dfVo1+NXE7N5o8f7aJTUAMmPdLW6HJqHGufH3LvNDekLJobUhbNDePdqUff0KBv7RT0K8fqrQl8tf00vxsTQkt/d6PLqVFqw/yQe6O5IWXR3JCyaG4Yr9ouxhW5V0N6+NPA1UkLc0VERERuQ0FfahwHe1vGDGzBuavX2bjvnNHliIiIiFRLCvpSI3Vs0YC2zT34clsCaZm5RpcjIiIiUu0o6EuNZDKZGBseRH5BEV9sije6HBEREZFqR0FfaqyGHnV4oFtTdhy+SFzSzd9qLCIiIlKbKehLjfZQaDPcXRxZtO5Ele9wJCIiIlKdKehLjeboYMvogS1IvJzJD/u1MFdERESkhIK+1Hhdgr1o5e9O9JYE0rPyjC5HREREpFpQ0Jcaz2QyMXZQELn5hazUwlwRERERQEFfrETjBnUZ1KUJWw9eIP58mtHliIiIiBhOQV+sxsO9muFaz0ELc0VERERQ0Bcr4uxox6j+Zs5czGDLwfNGlyMiIiJiKDujCxCpSN1bN2RT7HmWfn+CNdtPk5KRi2d9RyLCAglt42N0eSIiIiJVRnf0xaqYTCbaNfcgr6CYlIxcAJLTc1nw7TF2HrlocHUiIiIiVUdBX6zOplvsp59XUMSqzdqRR0RERGoPBX2xOsnpueUaFxEREbFGCvpidTzrO5ZrXERERMQaKeiL1YkIC8TBrvTUtrezISIs0KCKRERERKqedt0Rq1Oyu86qzfGWdp2m3vW0646IiIjUKgr6YpVC2/hYgv3qrQl8tf00h08l0zbA0+DKRERERKqGWnfE6g0N9aehuzOLYk6Ql19odDkiIiIiVUJBX6yevZ0t4x4I5nJqNmt3njG6HBEREZEqoaAvtUKbZh70aNOQb348w4Xk60aXIyIiIlLpFPSl1hg1oAWO9rZ8HnOc4uJio8sRERERqVQK+lJruNZ14PF+gRw7m8qOwxeNLkdERESkUinoS63St2NjAn3rs2zjSTKz840uR0RERKTSKOhLrWJjMvHkAy3JyilgxaaTRpcjIiIiUmkU9KXWaeJdj8Fdm7DlwAVOJKYaXY6IiIhIpVDQl1rpkd4BeNZ35POY4xQUFhldjoiIiEiFU9CXWsnRwZYnBgVx7up11u1JNLocERERkQqnoC+1VkgLL0JaNOCrbae4mpptdDkiIiIiFUpBX2q1sYOCMJlMLFp/Qnvri4iIiFVR0JdazaO+E4/2CeBgfDJ7j18xuhwRERGRCqOgL7VeeBc/mnrXI+r7E1Fs0TkAACAASURBVGTnFhhdjoiIiEiFKHfQP3PmDFu2bCk1duDAASZNmsTo0aNZtmxZhRUnUhVsbWwY/2AwaZl5RG9NMLocERERkQphV94TPvjgA1JTU+nbty8A165d47nnniMrKwtHR0feeustPD09CQ8Pr/BiRSpLYGNX+oX4smFvEr3aNsLfx8XokkRERETuS7nv6B8+fJiePXtafl67di2ZmZmsWrWKnTt30qFDBxYsWFChRYpUhcfCmuNSx4EF3x2jqEgLc0VERKRmK3fQv3btGt7e3paft27dSqdOnQgKCsLBwYEhQ4YQHx9foUWKVIU6TvaMHmjm9MUMfth/zuhyRERERO5LuYO+s7MzGRkZABQWFrJ37166dOliedzJyYnMzMyKq1CkCnVv1ZA2zdxZuTmelIxco8sRERERuWflDvotWrTgyy+/JCUlheXLl5OVlUWvXr0sj587dw4PD48KLVKkqphMJsY9EExBYTFLN8QZXY6IiIjIPSv3YtwJEybwP//zP5Y+/VatWpW6o799+3Zat25dcRWKVLGG7nV4qKc/q7eeondCMu2aexpdkoiIiEi5lfuOfr9+/ViwYAFPPfUUL730Ep9++ikmkwmAlJQUfHx8iIiIuKtrXb9+nXfeeYfevXvTvn17IiIi2LBhw12dW1xczLJly4iIiKBDhw506dKFkSNHsm/fvlLHnTlzhtdee41+/frRoUMHBg8ezAcffEB6enqp44qKivj444954IEHaNu2LWFhYUyfPp3s7Oy7qkesy6+6++PjUYfPY46Tl19odDkiIiIi5VbuO/oAXbt2pWvXrjeNu7u7M3v27Lu+TmRkJEePHuW1117Dz8+P6OhoIiMjmTdvHmFhYbc9d8qUKaxbt46JEycSEhJCdnY2hw8fLhXMr127xqhRo6hbty6vvPIKjRo14vDhw8yaNYv9+/ezePFiy7HvvvsuUVFRPPvss/Ts2ZO4uDj+/ve/k5CQwLx58+76NYl1sLez4ckHgpm+ZD9rdpzmsbBAo0sSERERKZd7Cvr/raCggA0bNpCWlkb//v3x8vK64zmbN29mx44dzJ49m0GDBgHQo0cPEhMTmTp16m2DfkxMDNHR0URFRRESEmIZ79evX6njNm3aREpKCjNnziQ0NNTyHJmZmXz44YckJSXh5+fHhQsXiIqKYty4cbz++usA9OrVC1dXVyZPnsz27dtLrUOQ2qGlvzs92/rw3a6z9Gjjg2+DukaXJCIiInLXyt26M336dB577DHLz8XFxTzzzDP85je/4c033+Thhx/m7Nmzd7zO+vXrcXFxYeDAgZYxk8nE8OHDSUhI4OTJk2Weu2jRIrp06VIq5N+Knd2N9zH16tUrNe7icuPLkBwcHAA4ePAgRUVF9O/fv9RxAwYMAG68sZDaaeQAM04Otnz+3TGKi7W3voiIiNQc5Q76W7duLbX4duPGjezZs4cJEybw17/+FYD58+ff8TpxcXGYzWZsbEqXEBwcDMCJEydueV5+fj6xsbEEBwczY8YMevbsSevWrRk6dCjR0dGljh0wYAC+vr5MmzaNkydPcv36dX788Uc+++wzhg0bZvk+gPz8fOA/wb+Evb39bWsR61e/jgMj+ps5kZTGtkMXjC5HRERE5K6Vu3Xn4sWL+Pv7W37+4Ycf8PPz47XXXgNuBPg1a9bc8Tqpqak0a9bspnFXV1fL42Wdl5eXR3R0ND4+PrzxxhvUr1+fFStWMHnyZPLz8xk5ciRw407+8uXLefnllxk6dKjlGsOHD+fdd9+1/Gw2mwHYt29fqTcx+/fvB24sMpbaq3f7Rmw7dIEvfoino7kBLnUc7nySiIiIiMHKHfTz8/OxtbW1/Lxr1y7LVpsATZo04cqVK3d1rZLdesrzWFFREQC5ubnMnz8fX19fAHr27EliYiJz5syxBP309HQiIyPJzs5m5syZeHl5cfjwYebMmUNRURHTp08HoGXLlnTt2pX58+fj7+9PaGgocXFxvPXWW9ja2t70qcPd8vSsd+eDKoGXl4shz2vNfjO6E6/M2MSanWd5ZfTtW8aqO80PKYvmhpRFc0PKorlRvZU76Pv4+BAbG8uoUaOIi4sjMTGRX//615bHk5OTqVOnzh2v4+bmdsu79mlpacB/7uz/N1dXV0wmE82bN7eEfLjxxqBPnz7MnTuX5ORkPD09+eijjzhy5AibNm3C0/PGXuhdu3bFzc2NyZMnM2LECMvuQX//+9+ZPHmy5bXY29vz9NNPs2PHDktPf3klJ2dSVFS1fd1eXi5cuZJRpc9ZG9SxMzG4WxO+/fEsnVt4EtzU3eiS7onmh5RFc0PKorkhZdHcMJ6Njem2N5bLHfSHDh3K3LlzuXbtGnFxcdSrV6/UDjk///wzTZs2veN1zGYz69ato6ioqNQd85J++KCgoFue5+TkVKp16JdKFkuWfBpw9OhRGjVqZAn5Jdq2bQvAyZMnLUG/5I1BcnIyV65cwdfXF0dHRxYvXsyzzz57x9cj1m9YrwD2/HyZhTHH+dOz3bCzvbdPekRERESqQrmTygsvvMDw4cOJjY3FZDIxbdo06tevD0BGRgYbN260bGV5O4MGDSI9PZ2NGzeWGl+9ejUBAQGWvvmyzk1ISCApKckyVlxczJYtW2jSpAkeHh4AeHt7c/78+ZtaiWJjYwFo2LDhTdf29PSkZcuWuLi4EBUVRVFREY8//vgdX49YP0d7W8YNDuJCchbf7brzzlIiIiIiRir3HX0HBwf+8pe/3PKxunXrsm3bNpycnO54nbCwMLp3786UKVNITU3Fz8+P1atXs3fvXubOnWs5bvz48ezevZvjx49bxiZMmMCaNWuYOHEikZGRuLi4sHLlSo4cOcLMmTMtx40ZM4Y1a9bw7LPP8txzz+Ht7c3BgweZN28eZrOZ3r17W45dsmQJtra2NG3alLS0NDZv3kx0dDR//vOfadSoUXl/TWKl2gc2oHOwF2t2nKZb64Z4uzkbXZKIiIjILZmKDdwcPDMzkxkzZhATE0N6ejpms5mXXnqJ8PBwyzG3CvoASUlJTJ8+nZ07d5KTk0NQUBAvvvhiqXMBDh06xJw5czhy5AhpaWk0atSI/v3788ILL+Du/p8+66ioKD7//HPOnz+PnZ0dbdu2ZdKkSXf16URZ1KNvnVIycvnDRz/Sws+V/x3R4baLyqsbzQ8pi+aGlEVzQ8qiuWG8O/Xo31PQz8rK4uOPP2b9+vWW9hk/Pz8GDx7MhAkT7moxbm2goG+91u9JZMmGOCY90oZurW5uAauuND+kLJobUhbNDSmL5obx7hT0y92jn5qayogRI5g7dy5Xr16lVatWtGrViuTkZObMmcOIESPK3ANfxFoM7OyHf0MXlmyIIyunwOhyRERERG5S7qD/j3/8g4SEBN544w22bdtGVFQUUVFRbN26lTfffJNTp04xe/bsyqhVpNqwsTHx5IPBpGfmEb0lwehyRERERG5S7qC/ceNGRowYwdixY0t9cZatrS1PPPEEjz32GN9//32FFilSHQU0qs+ATn5s3JfEqQvpRpcjIiIiUkq5g35Ju05ZWrduzdWrV++rKJGaYnjf5tSv58CC745R+O9vbRYRERGpDsod9Bs0aMDPP/9c5uM///wzDRo0uK+iRGqKOk52PBEexNlLmWzce87ockREREQsyh30+/fvz4oVK1i6dClFv7iDWVRUxLJly1i5ciUDBgyo0CJFqrMuwV60be7Bqq0JpGTkGl2OiIiICHAP22umpKQwevRozp49i4eHBwEBAQCcOnWKa9eu0bRpU5YuXVpqj/raSttr1h6XU7N54+NdtA/05KXh7Ywup0yaH1IWzQ0pi+aGlEVzw3gVvr2mu7s7K1eu5Pnnn8fNzY1Dhw5x6NAh3N3def7551m5cqVCvtQ63m7OPNyzGXuPX+HASa1REREREeNV+DfjLl26lIULF/LNN99U5GVrJN3Rr10KCot46197yM0r5J3nuuNob3vnk6qY5oeURXNDyqK5IWXR3DBehd/Rv5OUlBROnTpV0ZcVqfbsbG0YPziI5PQcvtqu/wdERETEWBUe9EVqs+Cm7vRu14h1uxNJupJpdDkiIiJSiynoi1SwEf0DcXa0Y2HMcYoqtjNORERE5K4p6ItUMJc6Dozsb+ZkUhrbDl4wuhwRERGppRT0RSpBr3Y+BDVx44sfTpKelWd0OSIiIlIL2d3NQf/617/u+oL79u2752JErIXJZOLJB4L5v093s3zjSSY+1NrokkRERKSWuaugP23atHJd1GQy3VMxItakcYO6PNi9KWt3nqFXu0a08tf3S4iIiEjVuaugv3DhwsquQ8QqPdyzGbt/vsTnMcf507PdsLdTt5yIiIhUjbsK+t26davsOkSskoO9LeMGBzNz+QG+3XWGYb0CjC5JREREagndXhSpZO2ae9K1pTdf7zjDpZQso8sRERGRWkJBX6QKjB7YAns7E4tijlOsvfVFRESkCijoi1QBdxdHIvoGcuR0Crt+vmR0OSIiIlILKOiLVJH+Ib4083Fh6YaTZOXkG12OiIiIWDkFfZEqYmNj4qkHW5KRlcfKzQlGlyMiIiJWTkFfpAr5+7gwsLMfm/afI/58mtHliIiIiBVT0BepYsP7NMfNxZHPvztOYVGR0eWIiIiIlVLQF6lizo52PBHegrOXM/n+pySjyxERERErpaAvYoBOQV60D/Rk9dZTXEvPMbocERERsUIK+iIGMJlMjBsURHFxMYvXnzC6HBEREbFCCvoiBmng5syw3gHsj7vK/rgrRpcjIiIiVkZBX8RAg7s2wderLovXnyAnr8DockRERMSKKOiLGMjO1oYnHwjmWnouX207bXQ5UgvtPHKR1+duZ9hvv+T1udvZeeSi0SWJiEgFUdAXMVgLPzf6dmjEuj2JJF7ONLocqUV2HrnIgm+PkZyeSzGQnJ7Lgm+PKeyLiFgJBX2RauDxfmbqOtux8LtjFBUXG12O1BKrNseTV1D6uxzyCopYtTneoIpERKQiKeiLVAP1nO0ZNcBM/Pl0tsSeN7ocqSWS03PLHE/Sp0siIjWegr5INRHaxoeWTd1YsSmetOt5RpcjVu5qajY2prIff/PT3fzl873sOHyBvPzCqitMREQqjIK+SDVhMpkY/0AweQWFLNsYZ3Q5YsWupmUzfcl+bG1M2NuW/mfAwc6G8Q8EMbK/mYysPD7++md+O2c7SzfEcSH5ukEVi4jIvbAzugAR+Y9GnnX5VXd/1uw4Te92jWjdzMPoksTKJKflMD1qP1k5Bfy/8Z25kJzFqs3xXEvPxaO+IxFhgYS28QHggW5NOHYmhR9iz7NhbxLr9iTSsqkb/UJ86RTkhZ2t7hWJiFRnpuJirfyrLMnJmRQVVe2v18vLhStXMqr0OaVi5RcU8sYnuzEBb0/ohr2dbYVdW/OjdruWnsO0qH1kZhfw2uiOBDSqb3nsTnMjLTOXbYcusDn2PFfTcqhfx57e7RvTt2NjvN2cq6J8MYj+bkhZNDeMZ2NjwtOzXpmP27711ltvVV05tUt2dh5V/Taqbl1HsrLU312T2drY0MizLut/SsLGZKKlv3uFXVvzo/ZKychletR+MrPzeXVUR5o3di31+J3mhpODHUFN3BjY2Y/mjV1Jz8pj26ELbPgpifhzaTg62OLt7oyN6TaN/1Ij6e+GlEVzw3gmk4k6dRzKfFytOyLVUJsAD7q3bsg3P56hRxsffDzqGF2S1GA3Qv4+0rPy+O2ojgT+V8gvDxsbE+0DPWkf6Mm19By2HDjP1oMXmL3qEG71HOjboTF9OzTGo75TBb4CERG5F2rdqURq3ZH7kZaZyx8+2kUzHxdeG90RUwXcKdX8qH1SM3OZFrWf1MxcfjuyI2a/W4f8+5kbhUVFHDyZzA+x5ziScA1M0CGwAf1CGtM2wBOb223vI9We/m5IWTQ3jHen1h3d0RepplzrOfJ4v0A+jznOj0cuEdrWx+iSpIZJy7zRrpOakcurozqUGfLvl62NDSFBXoQEeXElNfvGXf4D54k9eRXP+k6EdWxMn/aNcK3nWCnPLyIit2Zo0L9+/TozZ87ku+++Iz09HbPZzEsvvcTAgQPveG5xcTHLly9n2bJlxMfHY29vT/PmzZk8eTKdOnWyHHfmzBlmzZrFTz/9REpKCg0bNmTw4ME8//zz1K9fv9T1vvjiC5YsWcLp06dxdHQkKCiISZMm0bNnz0p5/SJ3EtaxMdsPXWDpxjjamz2p62RvdElSQ6Rdz2P6kv2kZOTyvyM70MLPrUqe18vNmcfCAnmkdwD7466yaf85Vm1J4Mttpwhp0YB+Ib609HdXL7+ISBUwtHXnmWee4ejRo7z22mv4+fkRHR3NmjVrmDdvHmFhYbc99w9/+APr1q1j4sSJhISEkJ2dzeHDhwkJCaFXr14AXLt2jSFDhlC3bl0iIyNp1KgRhw8fZtasWbRt25bFixdbrjd79mxmzZrF6NGjGTx4MNnZ2SxYsIA9e/bw6aef3lPYV+uOVISzlzJ4+7Of6NOhEU892PK+rqX5UTuk/zvkX03L5n9HdCC46Z0XdFfm3Lh4LYvNsefYdvAC13MK8HZ3pl9HX3q188HlNovIpHrQ3w0pi+aG8apt687mzZvZsWMHs2fPZtCgQQD06NGDxMREpk6detugHxMTQ3R0NFFRUYSEhFjG+/XrV+q4TZs2kZKSwsyZMwkNDbU8R2ZmJh9++CFJSUn4+fkBsGrVKjp37syf/vQny/k9e/ake/fufPXVV7qrL4Zp2tCF8C5+rNuTSK+2jSqt/UKsQ3pWHu8v2c/V1Gx+c5chv7L5eNRh1IAWRPRtzk/HrrAp9hzLfzjJqi3xdAn2pl+ILy38XCtkHYqIiPyHYd92sn79elxcXEq16ZhMJoYPH05CQgInT54s89xFixbRpUuXUiH/VuzsbryPqVev9DsdFxcXABwcHEodWzJewsnJCXt7+1LHiRjh0T4BeNR3ZGHMMQoKi4wuR6qpjKw8Pliynyup2bzyePsK3Zq1Itjb2RLa1of/N64zb0/oRlgHXw7EX2Xq4n288clu1v+USFZOvtFliohYDcOCflxcHGazGRub0iUEBwcDcOLEiVuel5+fT2xsLMHBwcyYMYOePXvSunVrhg4dSnR0dKljBwwYgK+vL9OmTePkyZNcv36dH3/8kc8++4xhw4bh7e1tOfbJJ59k69atfPHFF6Snp3Pp0iXeeecdiouLeeKJJyr41YuUj5ODHWPDg0i6cp31PyUaXY5UQ5nZ+by/JJZLKdn8+vH2tKrm36rs51WPsYODmPFSb575VUsc7W1Y8n0cr87ezqdrfyb+fBraFE5E5P4Y1rqTmppKs2bNbhp3dXW1PF7WeXl5eURHR+Pj48Mbb7xB/fr1WbFiBZMnTyY/P5+RI0cCN+7kL1++nJdffpmhQ4darjF8+HDefffdUtcdN24cjo6OvP322/zxj38EwNPTk08++YSWLe+vL1qkIoQEedHR3IAvt52ia0tvGrjq20jlhszsfD5Ysp+L17J45fH2tK7mIf+XHB1s6dOhMX06NObMxQw2xZ7jxyOX2HboAk2969EvxJfurRvi7KhN4kREysvQv5y368cs67GiohttC7m5ucyfPx9fX1/gRj99YmIic+bMsQT99PR0IiMjyc7OZubMmXh5eXH48GHmzJlDUVER06dPt1x39erVvPvuuzz99NP06tWL7OxsFi9ezKRJk/jkk09o165duV/f7RZHVCYvL5c7HyQ10sujQ3hp+kZWbD7FH5/tdk89zZof1iUjK493Pt/LhWtZvPFsdzq19L7zSWUwem54ebnQpV1jsnLy2bQviW93nGZhzHG+2HSSsE5N+FVoM5r7ao2KEYyeG1J9aW5Ub4YFfTc3t1vetU9LSwP+c2f/v7m63liw1bx5c0vIhxtvDPr06cPcuXNJTk7G09OTjz76iCNHjrBp0yY8PT0B6Nq1K25ubkyePJkRI0bQtWtX0tLS+L//+z9GjRrFb3/7W8s1e/fuzUMPPcT777/PwoULy/0ateuOVDQTMKxXAMt/OMm6HafoFORVrvM1P6zL9Zx8Plgay7krmURGtKeJp/M9//etbnOja4sGdDF7knA+nU37z7Fhz1m+23ma5o3rE9axMd1aNcTR3tboMmuF6jY3pPrQ3DDenXbdMaxH32w2Ex8fb7lDX6KkNz8oKOiW5zk5OeHv73/Lx0r6OUvuch49epRGjRpZQn6Jtm3bAlgW/J46dYqcnBzLeAl7e3uCg4OJj48vz0sTqVThXfzw86rH4vUnyM4tMLocMUhWTj5/tYT8drQP9LzzSTWMyWQi0NeVCQ+1ZkZkL8YMbEF2bgH/+uYYr87eTtT6E5y7et3oMkVEqi3Dgv6gQYNIT09n48aNpcZXr15NQEAAZrP5tucmJCSQlJRkGSsuLmbLli00adIED48b/ane3t6cP3+eK1eulDo/NjYWgIYNG1qOAzh48GCp4/Ly8jh69KjlOJHqwM7WhqceDCY1I5cvt50yuhwxQFZOAX9ddoDEy5n8z/B2tA9sYHRJla6ukz2DujbhnYnd+f0TIbQP9OSH/ed44+NdTF20lx+PXCS/QDtSiYj8kmGtO2FhYXTv3p0pU6aQmpqKn58fq1evZu/evcydO9dy3Pjx49m9ezfHjx+3jE2YMIE1a9YwceJEIiMjcXFxYeXKlRw5coSZM2dajhszZgxr1qzh2Wef5bnnnsPb25uDBw8yb948zGYzvXv3BqBx48aEh4cTFRWFg4ODpUd/0aJFJCYmlurlF6kOAn1dCevYmPU/JRLaxgd/H/VI1hbZuQXMWB7L2UsZ/M/wtnQ0W3/I/yWTyURwU3eCm7ozZmALth+6wObY88xfc5R638fRu10jwjo2pqFHHaNLFRExnKHfjJuZmcmMGTOIiYkhPT0ds9nMSy+9RHh4uOWYWwV9gKSkJKZPn87OnTvJyckhKCiIF198sdS5AIcOHWLOnDkcOXKEtLQ0GjVqRP/+/XnhhRdwd//PHtM5OTksXLiQNWvWkJSUhJOTE2azmYkTJ97xW3rLoh59qUzXc/KZMv9HPF2dmTK+MzY2d16Yq/lRs5WE/NMXMnjx0bblXqNxOzV5bhQVF/Pz6RQ27T/H/rirFBUX08rfnf4hvnRs0QA7W8M+vLYKNXluSOXS3DDenXr0DQ361k5BXyrbj0cuMn/NUcYNDmJAJ787Hq/5UXNl5xYw84sDJJxL58VH29A5+N5317kVa5kbKRm5bDt4ns0HznMtPRfXug70bt+IsA6NaeCmLWnvhbXMDal4mhvGu1PQ18bEIjVY99YN2XboAis3x9M5yAvXeo5GlySVICevgL/9O+RPeqTiQ741cXdx5OFeAQwNbcahhGQ27T/HNzvP8M3OM7QL9CSsY2PaB3pia6O7/CJi/Wzfeuutt4wuwlplZ+dR1Z+X1K3rSFZWXtU+qRjGZDIR2NiV7/eeIzk9hy532ENd86Pmyc0r5G9fHCT+XDovPNKGrvexT/7tWNvcMJlM+HjUoUcbH3q3a4Sjgy0HE5LZcuAC2w5eIDevEG/3OvoirrtgbXNDKo7mhvFMJhN16jiU+bj+wonUcA096vBQqD+rt52id/tk2gZY3zaLtVVuXiF/X3GAuKRUXhhWeSHf2nm6OjG8b3Me7tWMAyevsmn/OVZvO8VX20/TwexJ/xBfWgd4sOvoJVZtjic5PRfP+o5EhAUS2sbH6PJFRO6Zgr6IFfhVD392Hr3EopgTvD2hGw76IqEaLzf/Rsg/npjKcw+1plsrbfN7v+xsbegc7E3nYG8upWSxJfY8Ww9eYH/cVeo525GdW0jhv9dVJafnsuDbYwAK+yJSY6lJUcQK2NvZ8OTgIC6nZvP1zjNGlyP3KS+/kH+sOMjxxFQmPtSaHgqaFa6hex1G9Dfz15d68cKwNuTk/Sfkl8grKGLVZn1hoojUXAr6IlaiVTMPQts05Nsfz3AhWd8WWlPl5Rcya+VBjp1JYcLQVrqbXMns7Wzo3rohBYW3XlCVnJ5bxRWJiFQcBX0RKzJqQAucHGxZ+N1xtHNuzZNfUMisVYc4ejqFZ4e2omfbRkaXVGt41i97x6pZKw9y9pK2EBSRmkdBX8SK1K/rwOP9AjmemMqOwxeNLkfKwRLyT13j6SEt6dVOIb8qRYQF4mBX+p9EezsbOgU14NjZVN761x5mrTzImYsK/CJSc2gxroiV6dOhMdsPXWTZxpN0MDegnrO90SXJHeQXFDEn+jCHE67x9K9a0qd9Y6NLqnVKWqRutetOVk4+639KYt2eRPbH7SGkRQOG9QrA38fF4KpFRG5P34xbifTNuGKUpMuZ/OmzPfRs68MzQ1pZxjU/qp/8giLmRh/iQHwyTz4YTL+OvobUoblxZ1k5+Xz/UxIxexLJzi2oNYFfc0PKorlhPH0zrkgt5Oddj8Fdm/DtrrP0ateIoCZuRpckt1BQWMSHqw/fCPkPGBfy5e7UcbJnWO8Awrv4WQK/7vCLSHWmHn0RKzWsVwCe9Z34POY4BYVFRpcj/6Uk5MeevMq4wUH0C1HIrylKAv/7L4byaO8Ajp9N5U+f7eEfK9TDLyLVi4K+iJVydLBl7OAgzl29Tszus0aXI79QUFjEvC+PsD/uKmMHBTGgk5/RJck9KAn801/syaN9AjiRqMAvItWLevQrkXr0pTqYveoQB+Ku4FLXgbTMPDx+schQql5BYRH//OoIe49fYUx4CwZ1aWJ0SYD+dlSErJwCvt+byLrdiWTlFtDR3IBHetf8lh7NDSmL5obx7tSjrzv6IlYuuKkbhcWQmplHMTe+AGjBt8fYeUTbb1a1wqIi5q85yt7jVxg9sPqEfKkYdZzsGNbr1nf4T19MN7o8EamFFPRFrNy6W7Tt5BUUsWpzvAHV1F6FRUV85mJs+wAAIABJREFUtOYoPx27zKgBZgZ3Vci3Vr8M/MP/Hfjf/uwnBX4RqXLadUfEyiWn55Y5npdfiIO9bRVXVPsUFhXx8dc/s/vny4zsb+aBbk2NLkmqQB0nOx7uFcDAzk3YsDeRdXsSefuzn+hobsCw/9/enUdFded5H39XgYALmwiKbCIl4C6JiAviitqaaLSzp82mWaUn3TPJGZ+2M5OeJz2dmESfadQ2mmSSdLqT2FFIYkdx34KKiksUFRHD4h4EKyCCUPX8YawEoYyJyi3g8zrHE7j3d6u+Rb7n8qnLr343sQtdOvkYXaKINHO6oi/SzAX4eDrd9/yCTNI352OtqG7EiloWm83O2/88yPac09w9PIpxCQr5Lc2VwD/7mcFMTurKkeLLV/j/5x97OXZSV/hF5NZxe+mll14yuojmqrKymsb+qHPbtp5cuKDQJt/zbuPB/vwSan/wwXAPdzO/SAjH3c3Mxr0nWLOrmBJrJUH+bfBu42Fgtc3LlZC/7cBpfjmsKxMGdTG6JKd07rj1WrmbiQnzY0RcCJ4ebuw4dIY1u4r5+qSVju3b4O/t/E25kdQb4ox6w3gmk4k21/i9rVV3biGtuiOuYuuBUyzbeJRz1qp6q+6cLKlg9Y4ivtx/iks1NvpEBTA2PozYCH9MJpPBlTddNpud//3iIF/uP8XkpK7cObiL0SVdk84dja+yqoY1u4pZlVVIxcUa+kQFMCkxkshg15rSo94QZ9QbxvuxVXcU9G8hBX1xNdfqD+uFajZkH2dddjHWC5cID2rH2AHhxHcPwt1Ns/x+CpvdzrtfHGLLVye5KzGSiYmRRpf0o3TuME5lVQ1rdxWT4aKBX70hzqg3jKegbyAFfXE119Mfl2pq2XrgNKt2FHHimwr8vT0ZdXsow/t1po1Xq0aqtOmy2e28v/IQm/aeZOKQLtw1tKvRJV0XnTuM56qBX70hzqg3jKegbyAFfXE1P6U/bHY7+/PPkZFVyMGCUjxbuTG0TzDJ8WEE+rW+xZU2TZdD/mE27T3BnYO7cNfQyCYz/UnnDtfRUOCfOCSSrp2NCfzqDXFGvWE8BX0DKeiLq/m5/VF4+lsysorIOngam93O7dGBjB0QTlSI7y2osmmy2+38dVUuG3YfZ8KgCKYkdW0yIR907nBFlVU1rMsuZuV2YwO/ekOcUW8YT0HfQAr64mputD9Kv61iza4iNu4+wYWqGiwhvowdEEZct0DM5qYTam82u93OB6tzWZ99nPEDI/jlsKYV8kHnDldmdOBXb4gz6g3jKegbSEFfXM3N6o+L1TVs3neS1TuK+Ob8RYL8WpMcH0Zi72A8PVrWDbjsdjt/X32EtdnF/CIhnLuHRzW5kA86dzQFVwf+3l0DmJjYhajOt/Yva+oNcUa9YTwFfQMp6Iurudn9YbPZyc49S8aOQo4et9LWy53hcSGMvC3UZdcEv5nsdjsfrjnCml3FjB0Qxr0jLE0y5IPOHU3JlcCfkVVEeeWlWx741RvijHrDeAr6BlLQF1dzK/sj7/h5MrIKyc49i9lkIqFHR8YOCCcsyPkJqCmz2+18tDaP1TuLGBMfxn0jm27IB507mqLGCvzqDXFGvWE8BX0DKeiLq2mM/jhTVsnqHUVs2XeSqku19Ojiz9gB4fSKbN+kg/AP2e12lqzPIyOriNH9Q3lgVLcm/9p07mi6KqtqWL/7OCu3F1JeeYleXdszaUjkTfuwvHpDnFFvGE9B30AK+uJqGrM/Ki5eYuOeE6zZWURZeTUhHdoyJj6MgT070cq96d6Ay263848NR1m5vZBRt4fy4OimH/JB547m4GJ1Deuyb37gV2+IM+oN4ynoG0hBX1yNEf1RU2sj6+BpMrKKKDpTjk9bD0bdFsLwuBC823g0ai03ym6388nGo6zYVsiI20L4VXJ0swj5oHNHc3KzA796Q5xRbxhPQd9ACvriaozsD7vdzsGCUjKyivgqvwQPdzODewczJj6MTu3bGFLTT2G321m2KZ9/bi1gRFwIvxrTfEI+6NzRHF2srmF99nFWXAn8ke2ZmBiJ5ScGfvWGOKPeMJ6CvoEU9MXVuEp/HP+mglVZhWw9cIraWjt9LR0YOyCM6DA/lwzPdrudtM3HWJ75NcP6dWbq2BjMLljnjXCV3pCb70YDv3pDnFFvGE9B30AK+uJqXK0/zldUsz67mHXZxymvvESXTt6MHRBO/9hA3MyuM48/fXM+n335NUl9g3l4XGyzC/nger0hN9/Vgb9nZHsmXUfgV2+IM+oN4ynoG0hBX1yNq/ZH9aVaMvefImNHEafPXSDAx5PR/cNI6tuZ1p7uhtb22ZZjpG85RmKfYB79RfMM+eC6vSE338Xqy6v0rNh2fYFfvSHOqDeMp6BvIAV9cTWu3h82u519eSVkZBVyuKgMLw83kvp2Jrl/GAG+Xo1ez+dfHiNt8zGG9O7EY+O7N9uQD67fG3LzNRj4h0RiCa0b+NUbl209cIplG49SYq0iwMeTKcOiGNSzk9FlGUq9YTwFfQMp6IuraUr9ceyklVU7ithx8AwA/WMDGTsgnMhgn0Z5/uWZX7NsUz6De3Xi8fHdMZubb8iHptUbcnNVVddeDvzbC/j2wiV6dvFnUmJXzp6vZNnGo5yzVtG+BQVbu92OzW6nttZOre3yv+0HT7NkXR6XamyOcR7uZh75RWyL+Jk4o/OG8RT0DaSgL66mKfZHyfmLrNlVxKa9J6isqiU6zI+xA8Loa+lwy66wf7GtgE82HGVQz45Mm9Cj2Yd8aJq9ITfX1YHfZIIfJgRnwdZms1Nrs1FTe1VArrVRa7NT893XzvbZbHW3Xfna+T47NTbbD/Zdfv7vv77yz/aD77/72n59469XgI8nrz075Gb9L2hydN4wnoK+gRT0xdU05f6orKph894TrN5ZRIm1io7t2zAmPozBvTrh2crtpj3Piu0F/GP9UQb26Mj0O1pGyIem3Rtyc1VV1/Jv87/kQlVNvX0mE3h5uDvCfW2tncb8LedmNuFmNmH+7r9ubmbHNjc3M+519plwM5u//9r0g/Fu340xm3+w76rx5u+//3DtEac1vf3vI1xytbDGoPOG8X4s6Bv7KTcRkevU2tOdMQPCGdU/lF2Hz5KRVchfMw6TtimfEXEhjLw9FN+2N3YDrpXbC/nH+qMM6B7EtDua/3QdkYZ4erg1GPLh8hX+wb061QnS7j/4um7INuH+XZA2/zBIu5kuH1Nv3/dh/fswb/5B6DYZFqhX7SikxFrV4L4//S2bKUO7Ehvh38hVifw4BX0RaVLczGYGdO9IfGwQR4rPk5FVyPLMr1mxvYCBPTsxNj6MkEDnVzecWZVVyJL1ecTHBvHEnT1canlPkcYW4OPZYLAN8PHkoeRoAyoy1pRhUby34hDVV83Rj+8eRM7Xpcz+cDfdI/yZPLRrvQ8zixjJ7aWXXnrJ6CKaq8rKahp7YlTbtp5cuFDduE8qTUZz6g+TyUSArxcJPToysEdHamrtbDtwijW7ijl64jw+bT0I9PW6riuAq3cW8dHaPPrHBPLkxJ64u7W8kN+cekNunHcbD/bnl9SZr+7hbuaB0dGEBf30N9JNXVhQOwJ8vSg4ZaWyqpYAH08eGB3NxCGRjLwtBO/WHuzKPcuaXcXkn7ASHNAGv3aeRpd9y+m8YTyTyUSbNs7/mm3oHP2Kigrmzp3LypUrsVqtWCwWZsyYwahRo370WLvdzpIlS/j44485evQorVq1omvXrsycOZPbbrvNMa6goIDU1FR27txJaWkpHTt2ZMyYMTz55JP4+Hy/ekdMTIzT50pMTOTtt9/+ya9Pc/TF1TT3/iivvMT63cdZu6sYa0U1YUHtGBMfRkKPjk7D+9pdxfxtdS63Rwfy1KSWGfKh+feG/HRXlpNsaavu/FxV1bWsyy523JAsrlsH7hratVm/MdJ5w3gu/WHcxx57jJycHJ5//nlCQ0NJS0vj888/Z+HChQwbNuyax/7ud79j1apVTJ8+nbi4OCorK9m/fz9xcXEMGXL5E/Dnzp1j/PjxtG3blpSUFIKDg9m/fz+pqan06tWLv/3tb47H27NnT73n2LJlC6mpqbz00ks88MADP/n1KeiLq2kp/XGpxsa2nFOs2lHE8bMV+LXzYNTtoQyPC2Hf0RLHWthtvdypuFhDXLcOPHNXrxYb8qHl9Ib8dOqNn6ayqobVO4vIyCqisqqGAd2DmJQYSXBAW6NLu+nUG8Zz2Q/jbty4kczMTObNm0dycjIAAwcOpKioiFdeeeWaQT8jI4O0tDT+/ve/ExcX59g+fPjwOuM2bNhAaWkpc+fOZdCgQY7nKC8v5y9/+QvFxcWEhoYC0K9fv3rPs3DhQry8vLjjjjtu9OWKSCNq5W5maJ/OJPYO5sCxc2RkFbJ0Yz7pm/Ox203Yvru+UXGxBpMJbosObNEhX0Runtae7kwcEsmo20PJyCpk9c5idhw6w6CenZg4pAtB/m2MLlFaEMN+s61evRpvb+8603RMJhOTJ08mPz+fvLw8p8d+8MEH9O/fv07Ib4i7++X3Me3a1X2n4+3tDYCHh/M5Td988w2bN29mzJgxjvEi0rSYTCZ6dQ3g3+6P4w+PD8DdzewI+VfY7ZC+Od+gCkWkuWrr1YopSVHMfnoQYweEs/PQGX63aDvvrjhIyfmLRpcnLYRhQf/IkSNYLBbMV61scWWufG5uboPHXbp0iT179hATE8OcOXMYPHgwPXr0YMKECaSlpdUZO3LkSEJCQnj11VfJy8ujoqKCbdu28e677zJx4kSCgoKc1peWlkZNTQ2//OUvb/CViogrCAtqR9UlW4P7nC2bJyJyo7zbeHDvCAuvPj2IkbeFkLn/FP9n0VY+WHWY0m917pFby7CpO2VlZXTp0qXedl9fX8d+Z8dVV1eTlpZGp06dePHFF/Hx8eGTTz5h5syZXLp0iXvvvRe4fCV/yZIl/PrXv2bChAmOx5g8eTJ//OMfr1nfsmXLCAsLIyEh4We+QhFxNddaMlBE5FbybefJg8nRjEsIZ3nm12zcc4LN+04yIi6E8QMj8LnB+4CINMTQdfSvteyds3022+UrclVVVSxatIiQkBAABg8eTFFREfPnz3cEfavVSkpKCpWVlcydO5fAwED279/P/PnzsdlszJ49u8HnyM7OJj8/n+eee+6Gbs5xrQ9H3EqBgZpqJM615P549I6ezPvHXqou1Tq2ebZy49E7erbon8sV+hmIM+qNmycw0JuYqEAeKqngo9WHWbOziE17T3Dn0K5MHm7B+xpLJboi9YZrMyzo+/n5NXjV/vz588D3V/av5uvri8lkomvXro6QD5ffGAwdOpQFCxZQUlJCQEAAixcv5sCBA2zYsIGAgAAA4uPj8fPzY+bMmdxzzz3Ex8fXe46lS5diNpuZMmXKDb1Grbojrqal90fPcD8eHhfjWHUn4LslA3uG+7XonwuoN8Q59cat4QY8NKobI/t15rMvv+aTtUdYviWfMfHhJPcPo42X69/TVL1hPJdddcdisbBq1SpsNludefpX5uZHRzd85z0vLy8iIiIa3HdlpdArV+FzcnIIDg52hPwrevXqBUBeXl69oH/hwgVWrFjBkCFD6NRJ6wWLNDeDenbSWuAi4jKCA9ry1MSeTBgUwaebj/HplmOs2VnEuIRwRt0eipeH6wd+cV2GfRg3OTkZq9XKunXr6mxPT08nMjISi8VyzWPz8/MpLi52bLPb7WzatImwsDDat28PQFBQECdOnODs2bN1jr+yZn7Hjh3rPfbKlSupqKjQh3BFRESk0YQGtmPGlN7856PxRIX4snRjPv++cCsZWYVU/2C6ochPYdjbxGHDhpGQkMCsWbMoKysjNDSU9PR0du3axYIFCxzjpk6dSlZWFocPH3ZsmzZtGp9//jnTp08nJSUFb29vli5dyoEDB5g7d65j3AMPPMDnn3/O448/zhNPPEFQUBD79u1j4cKFWCwWEhMT69W1dOlS/Pz8ruvuvCIiIiI3U0Qnb35zT1+OHj9P2uZ8Pl6Xx8qsQu4Y1IWkvp1p5a57fsj1M/TOuOXl5cyZM4eMjAysVisWi4UZM2YwevRox5iGgj5AcXExs2fPZuvWrVy8eJHo6GieeeaZOscCfPXVV8yfP58DBw5w/vx5goODGTFiBE899RT+/v51xhYWFpKcnMzUqVP5/e9/f8OvT3P0xdWoP8QZ9YY4o94w1uHCUtI25ZNbfJ4AH0/uHBLJ4F6dXOImf+oN4/3YHH1Dg35zp6Avrkb9Ic6oN8QZ9Ybx7HY7OV+XkrY5n/wTVoL8WjMxsQsDe3TCbP75qwPeKPWG8Vz2w7giIiIi8uNMJhM9I9vTo4s/e4+WkL45n7eWH+SfWwuYlBhJ/9ggzDewHLg0Xwr6IiIiIk2AyWSin6UDfaIC2J17lvTNx1j46QFCMwuYPDSSft063ND9f6T5UdAXERERaULMJhO3xwQR1y2QrEOn+XTzMVKXfUWXTt7cNbQrvbu2V+AXQEFfREREpEkym00M7NGJ+Nggtu4/zWdfHuP//WMvlhBfJid1pXuE/48/iDRrCvoiIiIiTZib2Uxin2AG9uzIln0n+Tzza177cDex4X5MTupKt1A/o0sUgyjoi4iIiDQD7m5mhseFMKR3JzbsOcE/txbwpw+y6dW1PZOHdiUy2MfoEqWRKeiLiIiINCOt3N1I7h9GUt/OrMsuZsW2Qv7vezvpZ+nAXUMjCe/obXSJ0kgU9EVERESaIc9WbvwiIYLh/UJYs7OIlVlFvPS/O+gfG8RdiZF07tDW6BLlFlPQFxEREWnGWnu6c+eQSEbeHkpGVhGrdxax6/AZBvboyMTESDr6tzG6RLlFFPRFREREWoC2Xq2YktSV5P6hrNxeyNpdxWzPOcOQ3p24c0gXOvi2NrpEuckU9EVERERaEO82HtwzwsKY+DD+ua2ADbuPk7n/FEn9OnPHoC74e3saXaLcJAr6IiIiIi2QbztPHhwdzbgB4SzfWsCmPSfYvPckI28LYfzACHzaehhdotwgBX0RERGRFqy9jxcPj43hFwnhfP7l16zeWcSGPccZfXsY4xLCade6ldElys+koC8iIiIiBPq15vEJ3Rk/KILPthxjxbYC1mUXMyY+jDHx4bTxUmxsakx2u91udBHNVUlJOTZb4/54AwO9OXv220Z9Tmk61B/ijHpDnFFvtFzHz5aTvuUYuw6fpa2XO2MHhDO6fyi7j3zDso1HOWetor2PJ1OGRTGoZyejy22RzGYTAQHtnO7XWzMRERERqScksB0zJvem4NS3pG/OZ9mmfP659Wtqau3Ufnchs8RaxXsrDgEo7Lsgs9EFiIiIiIjriujkzXP39GXWw7djs+EI+VdU19hYtvGoQdXJtSjoi4iIiMiPiursy6VaW4P7SqxVbN53gm/KKhu5KrkWTd0RERERkesS4ONJibWq3naTCf73i8tTeDr4ehET7kdsuD/dI/xp7+PV2GXKdxT0RUREROS6TBkWxXsrDlFd8/2VfQ93Mw+PiyGiozeHCss4VFDK3rwSvvzqFABBfq2JjfAjJtyf2HB/3ZCrESnoi4iIiMh1ufKBW2er7oQEtmPU7aHY7HaOn63gUEEphwpL2XnoLJv2ngSgY/s2dA/3IzbCn5hwf3x1Y65bRstr3kJaXlNcjfpDnFFviDPqDXHmp/SGzWan6Ew5BwtKOVxYSm5xGZVVtQAEB7QhNsKf7uH+xIT74d1Gwf96aXlNERERETGU2WwiopM3EZ28GZcQTq3NRuHpcg4VlHKwsJTMr06xPvs4AKGBbYkNv3y1PybcT3fmvQEK+iIiIiLSqNzMZiKDfYgM9uEXAyOoqbVRcOpbDhWWcqiglE17T7BmVzEmICyoHbERl+f3R4f50sZLwf96KeiLiIiIiKHc3cxEhfgSFeLLhEFdqKm1kX/C6gj+67KPs2pHESYThHf0pnu4P7ERfnQL9aO1p+KsM/rJiIiIiIhLcXczEx3mR3SYHxOHRHKpppb8E1YOFpRyqLCMNbuKWJlViNlkokuwNzHhfnQP96dbqB+eHm5Gl+8yFPRFRERExKW1cnf7bs6+PwDVl2rJO37+8nKehaWsyipixbZC3MwmIoN9HMt5WkJ88WzVcoO/gr6IiIiINCkerdzo0aU9Pbq0B6CqupYjx8s4VHA5+H+xtZDlmQW4u5noGuzjmOMfFeJDK/eWE/wV9EVERESkSfP0cKNXZAC9IgMAqKyq4Ujxeccc/88zv+azL7/G3c2MJcSH2HB/YiP86drZB3c3s8HV3zoK+iIiIiLSrLT2dKdPVAB9oi4H/wsXL5Fb9H3w/3TLMdK3HMPD3Ywl1Pdy8A/3p0uwd7MK/gr6IiIiItKstfFqRb9uHejXrQMA5ZWXyC0qc9y5d9mmfAA8W7nRLdTXMdUnolM73MxNN/gr6IuIiIhIi9KudStuiw7ktuhAAKwXqsn97oO9hwrL+GTDUQC8PNyIDvP7bqqPH+FB3pjNpjqPtfXAKZZtPEqJtYoAH0+mDItiUM9Ojf6aGqKgLyIiIiItmk8bD/rHBtE/NgiA8xXVHP5ums/BwjL2HS0BoI2n+3fB34/YCH+Kz5bz/srDVNfYACixVvHeikMALhH2FfRFRERERH7At60HA7p3ZED3jgCUflt1OfgXlnKooIw9ed8AYALsVx1bXWNj2cajCvoiIiIiIq7O39uTgT07MfC78H7OepGDBaW8/c+DDY4vsVY1ZnlONd1PF4iIiIiIGKC9jxdDegcT4OPZ4H5n2xubgr6IiIiIyM8wZVgUHu5147SHu5kpw6IMqqguTd0REREREfkZrszD16o7IiIiIiLNzKCenVwm2F9NU3dERERERJohBX0RERERkWbI0KBfUVHByy+/TGJiIn369GHKlCmsXbv2uo612+18/PHHTJkyhb59+9K/f3/uvfdesrOz64wrKCjg+eefZ/jw4fTt25cxY8bw+uuvY7Va6z1mdXU1ixcvZsKECfTu3ZuEhASmTp3KsWPHbsrrFRERERFpLIbO0U9JSSEnJ4fnn3+e0NBQ0tLSSElJYeHChQwbNuyax86aNYtVq1Yxffp04uLiqKysZP/+/VRWVjrGnDt3jvvuu4+2bdvy3HPPERwczP79+0lNTWX37t387W9/c4ytqanhmWee4fDhwzz55JPExsby7bffsnv3bqqqXGMtVBERERGR62VY0N+4cSOZmZnMmzeP5ORkAAYOHEhRURGvvPLKNYN+RkYGaWlp/P3vfycuLs6xffjw4XXGbdiwgdLSUubOncugQYMcz1FeXs5f/vIXiouLCQ0NBeD9998nOzubzz77jLCwMMdjjBo16ma9ZBERERGRRmPY1J3Vq1fj7e1dJ0ibTCYmT55Mfn4+eXl5To/94IMP6N+/f52Q3xB398vvY9q1a1dnu7e3NwAeHh51HnPcuHF1Qr6IiIiISFNlWNA/cuQIFosFs7luCTExMQDk5uY2eNylS5fYs2cPMTExzJkzh8GDB9OjRw8mTJhAWlpanbEjR44kJCSEV199lby8PCoqKti2bRvvvvsuEydOJCgoCIATJ05w/PhxIiIi+M///E/i4+Pp1asXU6ZMYcOGDTf/xYuIiIiI3GKGBf2ysjJ8fX3rbb+yrayszOlx1dXVpKWlsXbtWl588UUWL15MdHQ0M2fOZMmSJY6x7dq1Y8mSJdTW1jJhwgRuu+02HnnkEYYMGcIrr7ziGHfmzBkAFi9eTG5uLq+++irz58/Hx8eHp59+ms2bN9/Mly4iIiIicssZ+mFck8n0k/fZbDYAqqqqWLRoESEhIQAMHjyYoqIi5s+fz7333guA1WolJSWFyspK5s6dS2BgIPv372f+/PnYbDZmz55d5zFbtWrF4sWLHVN9Bg4cyJgxY1iwYAFDhw79ya8vIKDdjw+6BQIDvQ15Xmka1B/ijHpDnFFviDPqDddmWND38/Nr8Kr9+fPnARq82n9lu8lkomvXro6QD5ffGAwdOpQFCxZQUlJCQEAAixcv5sCBA2zYsIGAgAAA4uPj8fPzY+bMmdxzzz2O7wHi4uLqzOf39PQkISGBjIyMn/UaS0srsNnsP+vYnysgoB0lJeWN+pzSdKg/xBn1hjij3hBn1BvGM5tN+Pu3dbrfsKBvsVhYtWoVNputzjz9K3Pzo6OjGzzOy8uLiIiIBvfZ7ZdD9ZW/BuTk5BAcHOwI+Vf06tULgLy8POLj4wkPD6d169ZOH/PqzxFcr2v94G8lo/6SIE2D+kOcUW+IM+oNcUa94doMm6OfnJyM1Wpl3bp1dbanp6cTGRmJxWK55rH5+fkUFxc7ttntdjZt2kRYWBjt27cHICgoiBMnTnD27Nk6x+/ZsweAjh07ApdX5xkxYgTZ2dmUl3//zvTixYts376dPn363NiLFRERERFpZCb7lcvgjcxut/PII49w+PBhXnjhBUJDQ0lPTyc9PZ0FCxYwcuRIAKZOnUpWVhaHDx92HFtaWspdd91F69atSUlJwdvbm6VLl5KRkcHcuXMZP348APv27ePBBx8kMjKSJ554gqCgIPbt28fChQsJDg4mLS3NscTmsWPHuPvuu7FYLEyfPh2z2cy7777L7t27ee+997j99tsb/4ckIiIiIvIzGRb0AcrLy5kzZw4ZGRlYrVYsFgszZsxg9OjRjjENBX2A4uJiZs+ezdatW7l48SLR0dE888wzdY4F+Oqrr5g/fz4HDhzg/PnzBAcHM2LECJ566in8/f3rjD106BCvvfYa2dnZ2Gw2+vTpw3PPPUf//v1v3Q9BREREROQWMDToi4iIiIjIrWHYHH0REREREbl1FPRFRERERJohBX0RERERkWZIQV9EREREpBlS0BcRERERaYYU9JuBiooKXn75ZRITE+nTpw9Tpkxh7dq1RpclLmDr1q3MnDmTsWPH0reQ44e0AAAK00lEQVRvX5KSkkhJSam3XK1IamoqMTExTJo0yehSxEVs376dxx9/nP79+9O3b1/Gjx/Pxx9/bHRZYrCcnByeffZZEhMT6devH+PHj2fRokVUV1cbXZo0wN3oAuTGpaSkkJOTw/PPP09oaChpaWmkpKSwcOFChg0bZnR5YqAPP/yQsrIyHn30UaKiovjmm2946623uPvuu/nrX/9Kv379jC5RXMCRI0dYvHgxHTp0MLoUcRFpaWnMmjWLe+65h0cffZRWrVqRn5/PpUuXjC5NDHT06FHuv/9+IiMj+d3vfoe/vz/btm1j7ty55OXlMXv2bKNLlKtoHf0mbuPGjTz55JPMmzeP5ORk4PJdhx988EHKyspYsWKFwRWKkUpKSggICKizzWq1MmrUKAYOHEhqaqpBlYmrsNls3H///fTu3Zvc3FysViuffvqp0WWJgU6ePMm4ceNISUnhiSeeMLoccSGpqanMmzeP1atXEx4e7tj+wgsv8MUXX7Bnzx5atWplYIVyNU3daeJWr16Nt7c3o0aNcmwzmUxMnjyZ/Px88vLyDKxOjHZ1yAfw8fEhIiKCU6dOGVCRuJp3332XU6dO8dvf/tboUsRFfPLJJ8DlO9OL/JC7++WJIO3atauz3dvbG3d3d9zc3IwoS65BQb+JO3LkCBaLBbO57v/KmJgYAHJzc40oS1zYuXPnOHLkCN26dTO6FDFYUVERf/7zn/mP//iPer+4peXasWMHUVFRrFq1irFjx9K9e3eSkpJ4/fXXNQ+7hZs0aRJ+fn689NJLFBUVUV5ezpo1a0hLS+Oxxx6rl0XEeJqj38SVlZXRpUuXett9fX0d+0WusNvtvPjii9hsNqZNm2Z0OWIgu93O73//exITExk9erTR5YgLOXPmDGfOnOHll1/mueeew2KxsG3bNhYtWsTJkyd54403jC5RDNK5c2c+/vhjZsyYUee88fTTT/Ob3/zGwMrEGQX9ZsBkMv2sfdLyzJ49mzVr1vCnP/2JqKgoo8sRAy1ZsoT9+/fzxRdfGF2KuBi73U5FRQVz5sxhwoQJACQkJHDx4kXeeecd/uVf/oWIiAiDqxQjHD9+nKeffprAwEDmz5+Pt7c3O3bs4M0338RkMinsuyAF/SbOz8+vwav258+fB76/si8yd+5c3nnnHWbNmsWUKVOMLkcMdO7cOV577TWeeuopWrdujdVqBaCmpgabzYbVasXT0xNPT0+DKxUj+Pn5AZCYmFhne1JSEu+88w4HDhxQ0G+h3njjDSoqKkhPT8fLywu4/CYQYP78+dx9992EhoYaWaJcRZOpmjiLxcLRo0ex2Wx1tl+Zmx8dHW1EWeJi/ud//oeFCxfywgsv8PDDDxtdjhjs9OnTfPvtt7zxxhvEx8c7/mVnZ5Obm0t8fLxWZGrBfuz3huZht1w5OTlYLBZHyL+iV69e2Gw28vPzDapMnNEV/SYuOTmZTz75hHXr1tWZL5eenk5kZCQWi8XA6sQVzJs3jwULFvDcc88xffp0o8sRFxAeHs77779fb/t///d/c+HCBV5++WU6d+5sQGXiCpKTk1myZAkbN25k4sSJju0bN27EZDLRu3dvA6sTIwUFBXHkyBEqKytp3bq1Y/vu3bsB6Nixo1GliRMK+k3csGHDSEhIYNasWZSVlREaGkp6ejq7du1iwYIFRpcnBnvnnXdITU1lxIgRDB48mD179jj2eXh40KNHDwOrE6O0bdvW8ef2H/Lx8QFocJ+0HElJSSQlJfFf//VflJaW0q1bN7Zt28b777/P/fffT0hIiNElikEefvhhZsyYwbRp03jkkUfw9vZm+/btvP322wwePNix4p+4Dt0wqxkoLy9nzpw5ZGRkYLVasVgs9T4RLy3T1KlTycrKanBfSEgI69ata+SKxJVNnTpVN8wSAC5cuEBqairLly+ntLSU4OBg7rnnHqZPn66pOy1cZmYmixYtIjc3lwsXLhASEsL48eN57LHHaNOmjdHlyVUU9EVEREREmiG9LRcRERERaYYU9EVEREREmiEFfRERERGRZkhBX0RERESkGVLQFxERERFphhT0RURERESaIQV9ERFpVqZOncrIkSONLkNExHC6M66IiPyo7du38/DDDzvd7+bmRk5OTiNWJCIiP0ZBX0RErtsdd9xBUlJSve26W6qIiOtR0BcRkevWo0cPJk2aZHQZIiJyHXQJRkREbpri4mJiYmJITU1l+fLl3HnnnfTu3Zvhw4eTmppKTU1NvWMOHTrEjBkzSEhIoHfv3owfP57FixdTW1tbb+zZs2d5+eWXGTVqFL169WLQoEE89thjfPnll/XGnj59mn/9138lPj6efv36MW3aNI4dO3ZLXreIiCvSFX0REblulZWVnDt3rt52Dw8P2rVr5/h+/fr1vPfeezz00EN06NCBdevWMW/ePE6cOMGf/vQnx7ivvvqKqVOn4u7u7hi7fv16Xn/9dQ4dOsQbb7zhGFtcXMwDDzxASUkJkyZNolevXlRWVrJ3714yMzMZMmSIY+yFCxf41a9+Rd++ffntb39LcXEx77//Ps8++yzLly/Hzc3tFv2ERERch4K+iIhct9TUVFJTU+ttHz58OG+++abj+4MHD/LJJ5/Qs2dPAH71q1+RkpLCsmXLuO++++jXrx8Af/zjH6muruajjz4iNjbWMfY3v/kNy5cv5+6772bQoEEA/OEPf+DMmTO89dZbDB06tM7z22y2Ot+XlpYybdo0nnjiCce29u3b89prr5GZmVnveBGR5khBX0RErtt9993HuHHj6m1v3759ne8HDx7sCPkAJpOJ6dOns2bNGlavXk2/fv0oKSlh9+7dJCcnO0L+lbFPP/00K1euZPXq1QwaNIiysjI2b97M0KFDGwzpV38Y2Gw211slaODAgQAUFBQo6ItIi6CgLyIi1y0iIoLBgwf/6LioqKh62ywWCwBFRUXA5ak4P9x+9fFms9kxtrCwELvdTo8ePa6rzqCgIDw9Pets8/PzA6CsrOy6HkNEpKnTh3FFROSmM5lMPzrGbrdf9+NdGXs9jwtccw7+T3leEZGmTEFfRERuury8PKfbwsLC6vy3obH5+fnYbDbHmIiICEwmk27KJSLyEyjoi4jITZeZmcmBAwcc39vtdt566y0ARo8eDUBAQABxcXGsX7+e3NzcOmMXLVoEQHJyMnB52k1SUhKbNm0iMzOz3vPpKr2ISH2aoy8iItctJyeHTz/9tMF9VwI8QGxsLI888ggPPfQQgYGBrF27lszMTCZNmkRcXJxj3KxZs5g6dSoPPfQQDz74IIGBgaxfv54tW7Zwxx13OFbcAXjxxRfJycnhiSee4K677qJnz55UVVWxd+9eQkJCeOGFF27dCxcRaYIU9EVE5LotX76c5cuXN7hv1apVjrnxI0eOJDIykjfffJNjx44REBDAs88+y7PPPlvnmN69e/PRRx/x5z//mQ8//JALFy4QFhbG888/z+OPP15nbFhYGEuXLmX+/Pls2rSJTz/9FB8fH2JjY7nvvvtuzQsWEWnCTHb9vVNERG6S4uJiRo0aRUpKCr/+9a+NLkdEpEXTHH0RERERkWZIQV9EREREpBlS0BcRERERaYY0R19EREREpBnSFX0RERERkWZIQV9EREREpBlS0BcRERERaYYU9EVEREREmiEFfRERERGRZkhBX0RERESkGfr/A1vlSmZzod8AAAAASUVORK5CYII=\n",
      "text/plain": [
       "<Figure size 864x432 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "import seaborn as sns\n",
    "\n",
    "# Use plot styling from seaborn.\n",
    "sns.set(style='darkgrid')\n",
    "\n",
    "# Increase the plot size and font size.\n",
    "sns.set(font_scale=1.5)\n",
    "plt.rcParams[\"figure.figsize\"] = (12,6)\n",
    "\n",
    "# Plot the learning curve.\n",
    "plt.plot(loss_values, 'b-o')\n",
    "\n",
    "# Label the plot.\n",
    "plt.title(\"Training loss\")\n",
    "plt.xlabel(\"Epoch\")\n",
    "plt.ylabel(\"Loss\")\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/maryam/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:10: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  # Remove the CWD from sys.path while we load stuff.\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "9f7c135222bb427cac52d1199e01f50c",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=2098.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "length of test_split_v is: 11040\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>story_id</th>\n",
       "      <th>chunk_num</th>\n",
       "      <th>raw_text</th>\n",
       "      <th>text_chunk</th>\n",
       "      <th>label</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>35358787</td>\n",
       "      <td>35358787_0</td>\n",
       "      <td>brussels, september 3 (ria novosti)  the europ...</td>\n",
       "      <td>brussels, september 3 (ria novosti) the europe...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>13312382</td>\n",
       "      <td>13312382_0</td>\n",
       "      <td>german chancellor angela merkel said monday sh...</td>\n",
       "      <td>german chancellor angela merkel said monday sh...</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>35312144</td>\n",
       "      <td>35312144_0</td>\n",
       "      <td>european union foreign ministers on friday cle...</td>\n",
       "      <td>european union foreign ministers on friday cle...</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>35312144</td>\n",
       "      <td>35312144_1</td>\n",
       "      <td>european union foreign ministers on friday cle...</td>\n",
       "      <td>had to be sent a clear message that its incurs...</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>34817284</td>\n",
       "      <td>34817284_0</td>\n",
       "      <td>kiev, june 24 (ria novosti)  ukrainian preside...</td>\n",
       "      <td>kiev, june 24 (ria novosti) ukrainian presiden...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   story_id   chunk_num                                           raw_text  \\\n",
       "0  35358787  35358787_0  brussels, september 3 (ria novosti)  the europ...   \n",
       "1  13312382  13312382_0  german chancellor angela merkel said monday sh...   \n",
       "2  35312144  35312144_0  european union foreign ministers on friday cle...   \n",
       "3  35312144  35312144_1  european union foreign ministers on friday cle...   \n",
       "4  34817284  34817284_0  kiev, june 24 (ria novosti)  ukrainian preside...   \n",
       "\n",
       "                                          text_chunk  label  \n",
       "0  brussels, september 3 (ria novosti) the europe...      1  \n",
       "1  german chancellor angela merkel said monday sh...      0  \n",
       "2  european union foreign ministers on friday cle...      0  \n",
       "3  had to be sent a clear message that its incurs...      0  \n",
       "4  kiev, june 24 (ria novosti) ukrainian presiden...      1  "
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Apply split function on the test data\n",
    "test['text_split'] = test['raw_text'].apply(get_split)\n",
    "test.head()\n",
    "\n",
    "# create a row split version of dataset \n",
    "\n",
    "from tqdm import tqdm_notebook as tqdm\n",
    "tmp = []\n",
    "\n",
    "for i in tqdm(range(len(test))):\n",
    "    for j in range(len(test.iloc[i].text_split)):\n",
    "        chunk_num = str(test.iloc[i]['story_id']) + '_' + str(j)\n",
    "        tmp.append(\n",
    "        {'story_id': test.iloc[i]['story_id'],\n",
    "            'chunk_num': chunk_num,\n",
    "            'raw_text': test.iloc[i]['raw_text'],\n",
    "            'text_chunk': test.iloc[i]['text_split'][j],\n",
    "            'label': test.iloc[i].label}\n",
    "        )\n",
    "\n",
    "test_split_v = pd.DataFrame(tmp) \n",
    "# train.head()\n",
    "print('length of test_split_v is:', len(train_split_v))\n",
    "test_split_v.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE.\n",
      "\n",
      "               2,702 test comments.\n",
      "               1,186 labeled as 1\n",
      "               1,516 labeled as 0\n"
     ]
    }
   ],
   "source": [
    "# Performance On Test Set\n",
    "# Data preparation\n",
    "\n",
    "\n",
    "\n",
    "# Tokenize all of the sentences and map the tokens to thier word IDs.\n",
    "test_input_ids = []\n",
    "\n",
    "\n",
    "# For every sentence...\n",
    "for text in test_split_v.text_chunk:\n",
    "    \n",
    "    # Report progress. \n",
    "    if ((len(input_ids) % 20000) == 0):\n",
    "        print('    Read {:,} comments.'.format(len(input_ids)))\n",
    "        \n",
    "    # `encode` will:\n",
    "    #   (1) Tokenize the sentence.\n",
    "    #   (2) Prepend the `[CLS]` token to the start.\n",
    "    #   (3) Append the `[SEP]` token to the end.\n",
    "    #   (4) Map tokens to their IDs.\n",
    "    encoded_sent = tokenizer.encode(\n",
    "                        text,                      # Sentence to encode.\n",
    "                        add_special_tokens = True, # Add '[CLS]' and '[SEP]'\n",
    "                        max_length = MAX_LEN,       # Truncate all sentences \n",
    "                   )\n",
    "    # Add the method sentence to the list\n",
    "    test_input_ids.append(encoded_sent)\n",
    "    \n",
    "print('DONE.')\n",
    "print('')\n",
    "print('{:>20,} test comments.'.format(len(test_input_ids)))\n",
    "\n",
    "\n",
    "# Also retrieve the labels as a list:\n",
    "# Get the labels from the Dataframe and convert from booleans to ints. \n",
    "\n",
    "test_labels = test_split_v['label'].to_numpy().astype(int)\n",
    "\n",
    "print('{:>20,} labeled as 1'.format(np.sum(test_labels)))\n",
    "print('{:>20,} labeled as 0'.format(len(test_labels) - np.sum(test_labels)))\n",
    "\n",
    "# Pad our input tokens\n",
    "test_input_ids = pad_sequences(test_input_ids, maxlen=MAX_LEN, \n",
    "                          dtype=\"long\", truncating=\"post\", padding=\"post\")\n",
    "\n",
    "# Create attention masks\n",
    "test_attention_masks = []\n",
    "\n",
    "# Create a mask of 1s for each token followed by 0s for padding\n",
    "for seq in test_input_ids:\n",
    "    seq_mask = [float(i>0) for i in seq]\n",
    "    test_attention_masks.append(seq_mask) \n",
    "\n",
    "# Convert to tensors.\n",
    "test_inputs = torch.tensor(test_input_ids)\n",
    "test_masks = torch.tensor(test_attention_masks)\n",
    "test_labels = torch.tensor(test_labels)\n",
    "\n",
    "# Set the batch size.  \n",
    "batch_size = 16  \n",
    "\n",
    "# Create the DataLoader.\n",
    "test_data = TensorDataset(test_inputs, test_masks, test_labels)\n",
    "test_sampler = SequentialSampler(test_data)\n",
    "test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting labels for 2,702 test sentences...\n",
      "    Batch   100  of  2,702,      Elapsed: 0:00:16.\n",
      "    DONE.\n"
     ]
    }
   ],
   "source": [
    "# Prediction on test set\n",
    "\n",
    "print('Predicting labels for {:,} test sentences...'.format(len(test_inputs)))\n",
    "\n",
    "# Put model in evaluation mode\n",
    "model.eval()\n",
    "\n",
    "# Tracking variables \n",
    "predictions , true_labels = [], []\n",
    "\n",
    "# Measure elapsed time.\n",
    "t0 = time.time()\n",
    "\n",
    "# Predict \n",
    "for (step, batch) in enumerate(test_dataloader):\n",
    "    \n",
    "    # Add batch to GPU\n",
    "    batch = tuple(t.to(device) for t in batch)\n",
    "  \n",
    "    # Progress update every 100 batches.\n",
    "    if step % 100 == 0 and not step == 0: \n",
    "        # calculate elapsed time in minutes.\n",
    "        elapsed = format_time(time.time() - t0)\n",
    "        \n",
    "        # Report progress\n",
    "        print('    Batch {:>5,}  of  {:>5,},      Elapsed: {:}.'.format(step, len(test_labels), elapsed))\n",
    "        \n",
    "        \n",
    "    # Unpack the inputs from our dataloader\n",
    "    b_input_ids, b_input_mask, b_labels = batch\n",
    "  \n",
    "    # Telling the model not to compute or store gradients, saving memory and \n",
    "    # speeding up prediction\n",
    "    with torch.no_grad():\n",
    "        # Forward pass, calculate logit predictions\n",
    "        outputs = model(b_input_ids, token_type_ids=None, \n",
    "                      attention_mask=b_input_mask)\n",
    "\n",
    "    logits = outputs[0]\n",
    "\n",
    "    # Move logits and labels to CPU\n",
    "    logits = logits.detach().cpu().numpy()\n",
    "    label_ids = b_labels.to('cpu').numpy()\n",
    "  \n",
    "    # Store predictions and true labels\n",
    "    predictions.append(logits)\n",
    "    true_labels.append(label_ids)\n",
    "\n",
    "print('    DONE.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Combine the results across the batches.\n",
    "predictions = np.concatenate(predictions, axis=0)\n",
    "true_labels = np.concatenate(true_labels, axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[ 0.16747084, -0.09904484],\n",
       "       [ 0.1662383 , -0.09477215],\n",
       "       [ 0.16399503, -0.09905042],\n",
       "       [ 0.16810508, -0.09691988],\n",
       "       [ 0.16665009, -0.09503195],\n",
       "       [ 0.16630077, -0.09839007],\n",
       "       [ 0.16835368, -0.09515082],\n",
       "       [ 0.16914505, -0.09748755],\n",
       "       [ 0.1666932 , -0.09336632],\n",
       "       [ 0.16807283, -0.09462682],\n",
       "       [ 0.16497596, -0.09761261],\n",
       "       [ 0.16601484, -0.09732819],\n",
       "       [ 0.16813472, -0.09486486],\n",
       "       [ 0.1663592 , -0.09482013],\n",
       "       [ 0.16552177, -0.09467977],\n",
       "       [ 0.1653005 , -0.09803161],\n",
       "       [ 0.16651098, -0.09912128],\n",
       "       [ 0.16744445, -0.09720971],\n",
       "       [ 0.16649315, -0.09775088],\n",
       "       [ 0.1671198 , -0.09889032]], dtype=float32)"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "predictions[20:40]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1])"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "true_labels[20:40]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.5610658771280533\n"
     ]
    }
   ],
   "source": [
    "p1 = predictions[:,1]-predictions[:,0]\n",
    "pred_label = []\n",
    "for i in p1:\n",
    "    if i>0:\n",
    "        pred_label.append(1)\n",
    "    elif i<=0:\n",
    "        pred_label.append(0)    \n",
    "\n",
    "\n",
    "accuracy = np.sum(pred_label == true_labels) / len(true_labels)\n",
    "print(accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test ROC AUC: 0.460\n"
     ]
    }
   ],
   "source": [
    "# Our performance metric for the test set.\n",
    "from sklearn.metrics import roc_auc_score\n",
    "\n",
    "# Use the model output for label 1 as our prediction.\n",
    "p1 = predictions[:,1]-predictions[:,0]\n",
    "\n",
    "# Calculate the ROC AUC\n",
    "auc = roc_auc_score(true_labels, p1)\n",
    "\n",
    "print('Test ROC AUC: %.3f' %auc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "total 427964K\r\n",
      "-rw-rw-r-- 1 maryam maryam      2K Oct  9 17:40 config.json\r\n",
      "-rw-rw-r-- 1 maryam maryam 427721K Oct  9 17:40 pytorch_model.bin\r\n",
      "-rw-rw-r-- 1 maryam maryam      1K Oct  9 17:40 special_tokens_map.json\r\n",
      "-rw-rw-r-- 1 maryam maryam      1K Oct  9 17:40 tokenizer_config.json\r\n",
      "-rw-rw-r-- 1 maryam maryam    227K Oct  9 17:40 vocab.txt\r\n"
     ]
    }
   ],
   "source": [
    "!ls -l --block-size=K ./model_save/"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # Load a trained model and vocabulary that you have fine-tuned\n",
    "# model = model_class.from_pretrained(output_dir)\n",
    "# tokenizer = tokenizer_class.from_pretrained(output_dir)\n",
    "\n",
    "# # Copy the model to the GPU.\n",
    "# model.to(device)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "cips",
   "language": "python",
   "name": "cips"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7-final"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}