{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Sentiment Analysis with Region Embeddings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\users\\thushan\\documents\\python_virtualenvs\\tensorflow_venv\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "# These are all the modules we'll be using later. Make sure you can import them\n",
    "# before proceeding further.\n",
    "%matplotlib inline\n",
    "from __future__ import print_function\n",
    "import collections\n",
    "import math\n",
    "import numpy as np\n",
    "import os\n",
    "import random\n",
    "import tensorflow as tf\n",
    "import tarfile\n",
    "from matplotlib import pylab\n",
    "from six.moves import range\n",
    "from six.moves.urllib.request import urlretrieve\n",
    "from sklearn.manifold import TSNE\n",
    "from sklearn.cluster import KMeans\n",
    "import nltk # standard preprocessing\n",
    "import operator # sorting items in dictionary by value\n",
    "from sklearn.utils import shuffle\n",
    "from math import ceil"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Download data\n",
    "\n",
    "Here we download the sentiment data from this [website](http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz). These are movie reviews submitted by users classfied according to if it is a positive/negative sentiment."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found and verified aclImdb_v1.tar.gz\n"
     ]
    }
   ],
   "source": [
    "url = 'http://ai.stanford.edu/~amaas/data/sentiment/'\n",
    "\n",
    "def maybe_download(filename, expected_bytes):\n",
    "  \"\"\"Download a file if not present, and make sure it's the right size.\"\"\"\n",
    "  if not os.path.exists(filename):\n",
    "    filename, _ = urlretrieve(url + filename, filename)\n",
    "  statinfo = os.stat(filename)\n",
    "  if statinfo.st_size == expected_bytes:\n",
    "    print('Found and verified %s' % filename)\n",
    "  else:\n",
    "    print(statinfo.st_size)\n",
    "    raise Exception(\n",
    "      'Failed to verify ' + filename + '. Can you get to it with a browser?')\n",
    "  return filename\n",
    "\n",
    "filename = maybe_download('aclImdb_v1.tar.gz', 84125825)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Read data\n",
    "Here the data is read into the program."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracted (or already had) all data\n",
      "Reading positive data\n",
      "Reading negative data\n",
      "Data size 7054759\n",
      "Example words (start):  ['bromwell', 'high', 'is', 'a', 'cartoon', 'comedy', '.', 'it', 'ran', 'at']\n",
      "Example words (end):  ['do', \"n't\", 'waste', 'your', 'time', ',', 'this', 'is', 'painful', '.']\n"
     ]
    }
   ],
   "source": [
    "# Number of read files\n",
    "files_read = 0\n",
    "\n",
    "# Contains positive and negative sentiments\n",
    "pos_members = []\n",
    "neg_members = []\n",
    "\n",
    "# Number of files to read\n",
    "files_to_read = 400\n",
    "\n",
    "# Creates a temporary directory to extract data to\n",
    "if not os.path.exists('tmp_reviews'):\n",
    "    os.mkdir('tmp_reviews')\n",
    "    \n",
    "def read_data(filename):\n",
    "    \"\"\"Extract the first file enclosed in a tar.z file as a list of words\"\"\"\n",
    "\n",
    "    # Check if the directory is empty or not\n",
    "    if os.listdir('tmp_reviews') == []:\n",
    "        # If not empty read both postive and negative files upto\n",
    "        # files_to_read many files and extract them to tmp_review folder\n",
    "        with tarfile.open(\"aclImdb_v1.tar.gz\") as t:\n",
    "            for m in t.getmembers():\n",
    "                # Extract positive sentiments and update files_read\n",
    "                if 'aclImdb/train/pos' in m.name and '.txt' in m.name:\n",
    "                    pos_members.append(m)\n",
    "                    files_read += 1    \n",
    "                    if files_read >= files_to_read:\n",
    "                        break\n",
    "\n",
    "                files_read = 0 # reset files_read\n",
    "                # Extract negative sentiments and update files_read\n",
    "                if 'aclImdb/train/neg' in m.name and '.txt' in m.name:\n",
    "                    neg_members.append(m)\n",
    "                    files_read += 1    \n",
    "                    if files_read >= files_to_read:\n",
    "                        break\n",
    "            \n",
    "            t.extractall(path='tmp_reviews',members=pos_members+neg_members)    \n",
    "    \n",
    "    print('Extracted (or already had) all data')\n",
    "    \n",
    "    # These lists will contain all the postive and negative\n",
    "    # reviews we read above\n",
    "    data = []\n",
    "    data_sentiment, data_labels = [],[]\n",
    "    \n",
    "    print('Reading positive data')\n",
    "    \n",
    "    # Here we read all the postive data\n",
    "    for file in os.listdir(os.path.join('tmp_reviews',*('aclImdb','train','pos'))):\n",
    "        if file.endswith(\".txt\"):\n",
    "            with open(os.path.join('tmp_reviews',*('aclImdb','train','pos',file)),'r',encoding='utf-8') as f:\n",
    "                \n",
    "                # Convert all the words to lower and tokenize\n",
    "                file_string = f.read().lower()\n",
    "                file_string = nltk.word_tokenize(file_string)\n",
    "                \n",
    "                # Add the words to data list\n",
    "                data.extend(file_string)\n",
    "                \n",
    "                # If a review has more than 100 words truncate it to 100\n",
    "                data_sentiment.append(file_string[:100])\n",
    "                # If a review has less than 100 words add </s> tokens to make it 100\n",
    "                if len(data_sentiment[-1])<100:\n",
    "                    data_sentiment[-1].extend(['</s>' for _ in range(100-len(data_sentiment[-1]))])\n",
    "                data_labels.append(1)\n",
    "    \n",
    "    print('Reading negative data')\n",
    "    # Here we read all the negative data\n",
    "    for file in os.listdir(os.path.join('tmp_reviews',*('aclImdb','train','neg'))):\n",
    "        if file.endswith(\".txt\"):\n",
    "            with open(os.path.join('tmp_reviews',*('aclImdb','train','neg',file)),'r',encoding='utf-8') as f:\n",
    "                \n",
    "                # Convert all the words to lower and tokenize\n",
    "                file_string = f.read().lower()\n",
    "                file_string = nltk.word_tokenize(file_string)\n",
    "                # Add the words to data list\n",
    "                data.extend(file_string)\n",
    "                \n",
    "                # If a review has more than 100 words truncate it to 100\n",
    "                data_sentiment.append(file_string[:100])\n",
    "                # If a review has less than 100 words add </s> tokens to make it 100\n",
    "                if len(data_sentiment[-1])<100:\n",
    "                    data_sentiment[-1].extend(['</s>' for _ in range(100-len(data_sentiment[-1]))])\n",
    "                data_labels.append(0)\n",
    "    return data, data_sentiment, data_labels\n",
    "  \n",
    "words, sentiments_words, sentiment_labels = read_data(filename)\n",
    "\n",
    "# Print some statistics of the dta\n",
    "print('Data size %d' % len(words))\n",
    "print('Example words (start): ',words[:10])\n",
    "print('Example words (end): ',words[-10:])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Building the Dictionaries\n",
    "Builds the following. To understand each of these elements, let us also assume the text \"I like to go to school\"\n",
    "\n",
    "* `dictionary`: maps a string word to an ID (e.g. {I:0, like:1, to:2, go:3, school:4})\n",
    "* `reverse_dictionary`: maps an ID to a string word (e.g. {0:I, 1:like, 2:to, 3:go, 4:school}\n",
    "* `count`: List of list of (word, frequency) elements (e.g. [(I,1),(like,1),(to,2),(go,1),(school,1)]\n",
    "* `data` : Contain the string of text we read, where string words are replaced with word IDs (e.g. [0, 1, 2, 3, 2, 4])\n",
    "\n",
    "It also introduces an additional special token `UNK` to denote rare words to are too rare to make use of."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Most common words (+UNK) [['UNK', 2710699], ('the', 334680), (',', 275887), ('.', 235397), ('and', 163334), ('a', 162144), ('of', 145399), ('to', 135145), ('is', 110248), ('/', 102097), ('>', 102036), ('<', 101971), ('br', 101871), ('it', 94863), ('in', 93175), ('i', 86498), ('this', 75507), ('that', 72962), (\"'s\", 62159), ('was', 50367), ('as', 46818), ('for', 44050), ('with', 44001), ('movie', 42547), ('but', 42358)]\n",
      "Sample data [0, 2, 0, 0, 3, 4, 0, 0, 5, 6]\n",
      "Vocabulary size:  19908\n"
     ]
    }
   ],
   "source": [
    "# We set max vocabulary to this\n",
    "vocabulary_size = 20000\n",
    "\n",
    "def build_dataset(words):\n",
    "  global vocabulary_size\n",
    "  count = [['UNK', -1]]\n",
    "\n",
    "  # Sorts words by their frequency\n",
    "  count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n",
    "    \n",
    "  # Define IDs for special tokens\n",
    "  dictionary = dict({'<unk>':0, '</s>':1})\n",
    "    \n",
    "  # Crude Vocabulary Control\n",
    "  # We ignore the most commone (words like a , the , ...)\n",
    "  # and most rare (having a repetition of less than 10)\n",
    "  # to reduce size of the vocabulary\n",
    "  count_dict = collections.Counter(words)\n",
    "  \n",
    "  for word in words:  \n",
    "    # Add the word to dictionary if already not encounterd\n",
    "    if word not in dictionary:\n",
    "        if count_dict[word]<50000 and count_dict[word] > 10:\n",
    "            dictionary[word] = len(dictionary)\n",
    "\n",
    "  data = list()\n",
    "  unk_count = 0\n",
    "    \n",
    "  # Replacing word strings with word IDs\n",
    "  for word in words:\n",
    "    if word in dictionary:\n",
    "      index = dictionary[word]\n",
    "    else:\n",
    "      index = 0  # dictionary['UNK']\n",
    "      unk_count = unk_count + 1\n",
    "    data.append(index)\n",
    "  count[0][1] = unk_count\n",
    "\n",
    "  # Create a reverse dictionary with the above created dictionary\n",
    "  reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) \n",
    "\n",
    "  # Update the vocabulary\n",
    "  vocabulary_size = len(dictionary)\n",
    "  return data, count, dictionary, reverse_dictionary\n",
    "\n",
    "data, count, dictionary, reverse_dictionary = build_dataset(words)\n",
    "# Print some statistics about the data\n",
    "print('Most common words (+UNK)', count[:25])\n",
    "print('Sample data', data[:10])\n",
    "print('Vocabulary size: ',vocabulary_size)\n",
    "del words  # Hint to reduce memory."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Processing data for the Region Embedding Learning"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Processing Data for the Sentiment Analysis\n",
    "Here we define a function as well as run that function which converts the above words in the postive/negative reviews into word IDs."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Sample data\n",
      "\t [0, 2, 0, 0, 3, 4, 0, 0, 5, 6, 0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 16, 9, 17, 18, 19, 0, 20, 21, 22, 0, 0, 23, 24, 25, 26, 0, 27, 0, 0, 2, 0, 28, 0, 29, 30, 0, 31, 32, 0, 17, 18, 19, 0, 0, 0, 0, 33, 34, 0, 0, 35, 36, 37, 38, 39, 40, 41, 42, 43, 18, 44, 0, 0, 0, 0, 0, 0, 45, 46, 0, 47, 48, 26, 0, 0, 49, 0, 50, 0, 42, 36, 0, 51, 0, 52, 0, 53, 0, 54]\n",
      "\t [0, 84, 85, 0, 9, 86, 87, 88, 89, 90, 91, 92, 93, 94, 22, 95, 96, 0, 97, 0, 98, 99, 100, 0, 101, 0, 102, 103, 104, 105, 37, 106, 107, 108, 109, 0, 14, 0, 110, 0, 85, 111, 94, 0, 112, 0, 113, 114, 77, 0, 0, 115, 9, 116, 0, 117, 118, 119, 120, 13, 121, 16, 9, 122, 0, 0, 123, 100, 124, 0, 0, 125, 0, 126, 0, 127, 0, 0, 0, 0, 0, 0, 85, 120, 128, 129, 130, 131, 132, 0, 133, 134, 100, 0, 0, 0, 0, 0, 0, 0]\n",
      "\t [268, 269, 193, 0, 203, 204, 0, 270, 271, 0, 272, 0, 273, 274, 275, 0, 0, 229, 276, 0, 277, 278, 216, 279, 0, 280, 0, 0, 281, 100, 282, 0, 0, 63, 0, 9, 283, 9, 284, 0, 244, 245, 0, 0, 285, 100, 286, 0, 287, 288, 0, 194, 224, 289, 0, 224, 0, 0, 0, 290, 291, 183, 292, 0, 0, 224, 293, 0, 294, 0, 0, 153, 0, 0, 17, 0, 67, 0, 294, 19, 169, 295, 0, 296, 297, 298, 0, 299, 0, 0, 0, 300, 108, 0, 301, 302, 303, 248, 0, 0]\n",
      "\t [0, 0, 332, 0, 113, 333, 264, 334, 0, 155, 335, 0, 336, 0, 337, 338, 0, 0, 339, 221, 259, 0, 340, 341, 0, 0, 84, 342, 0, 343, 0, 344, 345, 346, 347, 0, 340, 341, 0, 348, 349, 0, 85, 350, 347, 0, 340, 341, 0, 351, 135, 352, 89, 0, 74, 0, 0, 353, 354, 355, 0, 95, 356, 0, 0, 264, 0, 357, 358, 0, 0, 359, 74, 360, 216, 221, 0, 0, 0, 361, 0, 190, 0, 0, 362, 13, 10, 0, 0, 113, 363, 0, 364, 0, 365, 0, 366, 367, 0, 337]\n",
      "\t [0, 0, 221, 0, 380, 154, 155, 264, 0, 0, 0, 29, 381, 243, 32, 113, 0, 183, 382, 0, 383, 142, 0, 233, 0, 0, 0, 0, 384, 203, 204, 385, 0, 326, 0, 386, 0, 16, 0, 304, 0, 387, 388, 0, 389, 102, 10, 390, 0, 391, 273, 91, 392, 291, 0, 393, 179, 0, 0, 10, 276, 0, 391, 394, 273, 91, 395, 0, 164, 0, 396, 0, 174, 379, 0, 95, 47, 0, 47, 0, 0, 0, 397, 0, 398, 0, 399, 0, 39, 0, 0, 0, 400, 0, 283, 401, 0, 155, 402, 106]\n",
      "\t [0, 0, 82, 0, 414, 415, 416, 0, 417, 0, 0, 0, 0, 415, 416, 0, 418, 419, 420, 0, 0, 0, 0, 421, 0, 0, 63, 422, 141, 0, 0, 423, 165, 415, 0, 424, 229, 0, 0, 419, 0, 95, 0, 0, 82, 0, 419, 0, 425, 426, 0, 0, 0, 179, 0, 0, 427, 41, 54, 416, 428, 0, 429, 0, 430, 431, 0, 183, 0, 0, 0, 0, 0, 0, 0, 0, 0, 287, 432, 433, 434, 0, 435, 436, 0, 0, 437, 422, 158, 438, 29, 151, 0, 439, 440, 0, 162, 441, 0, 310]\n",
      "\t [470, 337, 92, 471, 62, 0, 472, 164, 0, 473, 474, 475, 0, 0, 0, 0, 0, 0, 0, 0, 0, 362, 476, 0, 477, 478, 119, 135, 174, 82, 479, 480, 0, 481, 0, 0, 0, 0, 0, 0, 0, 0, 482, 483, 193, 415, 416, 0, 0, 484, 485, 0, 0, 264, 38, 486, 487, 0, 38, 394, 488, 344, 29, 135, 489, 0, 264, 0, 490, 0, 0, 491, 233, 0, 357, 492, 0, 0, 326, 339, 82, 493, 494, 135, 0, 356, 495, 135, 496, 0, 497, 135, 498, 0, 0, 499, 0, 283, 0, 500]\n",
      "\t [0, 0, 17, 513, 514, 515, 419, 516, 100, 517, 518, 0, 519, 84, 415, 416, 89, 0, 0, 520, 378, 0, 521, 522, 248, 523, 0, 524, 525, 193, 0, 526, 362, 0, 0, 246, 527, 0, 183, 0, 528, 84, 529, 530, 89, 0, 51, 531, 532, 533, 13, 0, 431, 0, 84, 362, 89, 0, 490, 0, 519, 534, 402, 535, 536, 0, 0, 537, 0, 538, 0, 539, 540, 19, 541, 0, 264, 0, 542, 543, 0, 0, 0, 0, 0, 0, 0, 0, 135, 544, 258, 545, 546, 547, 548, 0, 0, 465, 0, 264]\n",
      "\t [0, 467, 527, 84, 636, 89, 0, 415, 416, 0, 529, 530, 0, 577, 578, 0, 435, 436, 0, 598, 599, 0, 433, 434, 0, 637, 0, 0, 638, 0, 0, 639, 203, 640, 0, 84, 641, 65, 621, 0, 89, 0, 0, 0, 0, 0, 0, 0, 0, 642, 0, 643, 416, 0, 644, 645, 646, 0, 0, 0, 0, 0, 0, 0, 0, 80, 0, 0, 13, 647, 0, 648, 152, 80, 0, 0, 649, 650, 70, 651, 165, 0, 11, 0, 652, 0, 0, 653, 0, 654, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "\t [135, 255, 0, 415, 416, 0, 755, 756, 730, 0, 0, 757, 758, 402, 0, 0, 759, 760, 165, 47, 547, 761, 360, 169, 90, 762, 0, 763, 84, 165, 764, 0, 765, 0, 17, 766, 0, 0, 19, 0, 54, 767, 51, 0, 768, 291, 95, 0, 549, 0, 769, 63, 89, 0, 0, 770, 169, 90, 385, 760, 273, 91, 304, 0, 771, 17, 772, 19, 0, 17, 70, 773, 774, 19, 0, 17, 0, 467, 527, 19, 0, 745, 775, 776, 0, 0, 777, 778, 779, 0, 0, 780, 183, 270, 110, 0, 781, 0, 0, 0]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "def build_sentiment_dataset(sentiment_words, sentiment_labels):\n",
    "  '''\n",
    "  This function takes in reviews and labels, and then replace \n",
    "  all the words in the reviews with word IDs we assigned to each\n",
    "  word in our dictionary\n",
    "  '''\n",
    "  data = [[] for _ in range(len(sentiment_words))]\n",
    "  unk_count = 0\n",
    "  for sent_id,sent in enumerate(sentiment_words):\n",
    "    for word in sent:\n",
    "        if word in dictionary:\n",
    "          index = dictionary[word]\n",
    "        else:\n",
    "          index = 0  # dictionary['UNK']\n",
    "          unk_count = unk_count + 1\n",
    "        data[sent_id].append(index)\n",
    "  \n",
    "  return data, sentiment_labels\n",
    "\n",
    "# Run the operation\n",
    "sentiment_data, sentiment_labels = build_sentiment_dataset(sentiments_words, sentiment_labels)\n",
    "print('Sample data')\n",
    "for rev in sentiment_data[:10]:\n",
    "    print('\\t',rev)\n",
    "\n",
    "del sentiments_words  # Hint to reduce memory."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Data Generators\n",
    "\n",
    "We define two data generators:\n",
    "* Data generator for generating data for classifiers\n",
    "* Data generator for generating data for region embedding algorithm\n",
    "\n",
    "\n",
    "### Data Generator for Training Classifiers\n",
    "\n",
    "Here we define a data generator function that generates data to train the classifier that identifies if a review is positive or negative"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "    batch: sum:  [4 9 9 5 6 6 5 7] [264 108  22 165  80  74  71   9]\n",
      "    labels:  [0 0 1 0 0 0 0 1]\n",
      "    batch: sum:  [7 6 5 5 6 6 8 4] [ 108   80 3955 1660   55    6   17  385]\n",
      "    labels:  [1 1 1 1 0 0 1 1]\n",
      "    batch: sum:  [6 7 5 8 5 8 7 8] [ 38 116 419   3 134  83  17  92]\n",
      "    labels:  [0 0 1 1 0 0 1 1]\n",
      "    batch: sum:  [6 7 9 4 5 8 7 8] [221  85  17  51  65   9 264  13]\n",
      "    labels:  [0 1 0 1 1 0 1 0]\n",
      "    batch: sum:  [4 6 7 8 7 8 6 7] [326  70  95 100 248   9  67  70]\n",
      "    labels:  [0 1 1 1 0 1 0 1]\n",
      "    batch: sum:  [10  7  9  6  9  4  8  6] [ 22  91  17   4  71 409  51  20]\n",
      "    labels:  [0 0 1 0 1 0 1 0]\n",
      "    batch: sum:  [6 6 6 7 9 6 7 4] [  8   9  11 100  82 165  44  10]\n",
      "    labels:  [0 1 0 0 0 0 0 0]\n",
      "    batch: sum:  [9 8 6 5 8 6 6 7] [ 17   6  39 248  37 221  94  44]\n",
      "    labels:  [0 0 0 1 0 0 1 0]\n",
      "    batch: sum:  [8 7 7 8 8 7 6 3] [ 17   6 413  74  92  20  77 103]\n",
      "    labels:  [0 1 1 1 1 1 1 1]\n",
      "    batch: sum:  [9 7 4 5 5 7 8 5] [152  37 568 131 100   6 114  70]\n",
      "    labels:  [1 0 0 1 0 0 0 0]\n",
      "\n",
      "Valid data\n",
      "    batch: sum:  [6 6 7 5 7 5 6 7] [ 70   6  92 165  65 131  19  20]\n",
      "    labels:  [1 1 1 0 0 1 0 1]\n",
      "    batch: sum:  [ 4  8  7  8 10  6  7  7] [824  92  39   8  17  17   8 100]\n",
      "    labels:  [0 0 1 1 1 1 0 0]\n",
      "    batch: sum:  [7 6 6 6 6 6 8 4] [  17   22   50 1901  229  326   37  131]\n",
      "    labels:  [0 0 1 1 1 1 1 0]\n",
      "    batch: sum:  [6 7 8 6 8 9 6 6] [94 13 95  4 13 94 51 17]\n",
      "    labels:  [1 1 0 1 1 0 0 1]\n",
      "    batch: sum:  [7 4 7 5 6 6 8 5] [ 90 362 152 116  17 131  17  39]\n",
      "    labels:  [1 1 1 0 0 0 0 1]\n",
      "    batch: sum:  [8 6 7 5 6 5 6 4] [ 17  20 216  94 326  90  20  95]\n",
      "    labels:  [1 1 1 0 1 1 0 1]\n",
      "    batch: sum:  [6 8 4 8 8 4 6 5] [106  17  52  15  54 264 179  52]\n",
      "    labels:  [0 1 1 1 0 1 1 0]\n",
      "    batch: sum:  [5 6 6 5 6 7 7 8] [ 20 990  90 273  20 131 110  37]\n",
      "    labels:  [0 1 1 0 1 1 1 1]\n",
      "    batch: sum:  [6 6 8 7 6 7 6 6] [ 83 326   9  20  20  91  20 128]\n",
      "    labels:  [0 0 1 1 0 0 1 1]\n",
      "    batch: sum:  [6 4 7 8 7 4 6 4] [  17 8717   65   17   27   13  762   92]\n",
      "    labels:  [1 0 1 0 0 1 1 1]\n"
     ]
    }
   ],
   "source": [
    "# Shuffle the data\n",
    "sentiment_data, sentiment_labels = shuffle(sentiment_data, sentiment_labels)\n",
    "\n",
    "sentiment_data_index = -1\n",
    "\n",
    "def generate_sentiment_batch(batch_size, region_size,is_train):\n",
    "  global sentiment_data_index\n",
    "\n",
    "  # Number of regions in a single review\n",
    "  # as a single review has 100 words after preprocessing\n",
    "  num_r = 100//region_size \n",
    "    \n",
    "  # Contains input data and output data\n",
    "  batches = [np.ndarray(shape=(batch_size, vocabulary_size), dtype=np.int32) for _ in range(num_r)]\n",
    "  labels = np.ndarray(shape=(batch_size), dtype=np.int32)\n",
    "\n",
    "  # Populate each batch index\n",
    "  for i in range(batch_size):\n",
    "    # Choose a data point index, we use the last 300 reviews (after shuffling)\n",
    "    # as test data and rest as training data\n",
    "    if is_train:\n",
    "      sentiment_data_index = np.random.randint(len(sentiment_data)-300)\n",
    "    else:\n",
    "      sentiment_data_index = max(len(sentiment_data)-300, (sentiment_data_index + 1)%len(sentiment_data))\n",
    "\n",
    "    # for each region\n",
    "    for reg_i in range(num_r):\n",
    "        batches[reg_i][i,:] = np.zeros(shape=(1, vocabulary_size), dtype=np.float32) #input\n",
    "        # for each word in region\n",
    "        for wi in sentiment_data[sentiment_data_index][reg_i*num_r:(reg_i+1)*num_r]:\n",
    "\n",
    "            # if the current word is informative (not <unk> or </s>)\n",
    "            # Update the bow representation for that region\n",
    "            if wi != dictionary['<unk>'] and wi != dictionary['</s>']:\n",
    "              batches[reg_i][i,wi] += 1 \n",
    "\n",
    "    labels[i] = sentiment_labels[sentiment_data_index]\n",
    "        \n",
    "  return batches, labels\n",
    "\n",
    "\n",
    "# Print some data batches to see what they look like\n",
    "for _ in range(10):\n",
    "    batches, labels = generate_sentiment_batch(batch_size=8, region_size=10, is_train=True)\n",
    "    \n",
    "    print('    batch: sum: ', np.sum(batches[0],axis=1), np.argmax(batches[0],axis=1))\n",
    "    print('    labels: ', labels)\n",
    "    \n",
    "print('\\nValid data')\n",
    "\n",
    "# Print some data batches to see what they look like\n",
    "for _ in range(10):\n",
    "    batches, labels = generate_sentiment_batch(batch_size=8, region_size=10, is_train=False)\n",
    "    \n",
    "    print('    batch: sum: ', np.sum(batches[0],axis=1), np.argmax(batches[0],axis=1))\n",
    "    print('    labels: ', labels)\n",
    "    \n",
    "sentiment_data_index = -1 # Reset the index"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "## Sentiment Analysis without Region Embeddings\n",
    "\n",
    "This is a standard sentiment classifier. It first starts with a convolution layer which sends the output to a fully connected classification layer."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "batch_size = 50\n",
    "\n",
    "tf.reset_default_graph()\n",
    "graph = tf.Graph()\n",
    "\n",
    "region_size = 10\n",
    "conv_width = vocabulary_size\n",
    "conv_stride = vocabulary_size\n",
    "\n",
    "num_r = 100//region_size\n",
    "\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input/output data.\n",
    "  train_dataset = [tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size]) for _ in range(num_r)]\n",
    "  train_labels = tf.placeholder(tf.float32, shape=[batch_size])\n",
    "\n",
    "  # Testing input/output data\n",
    "  valid_dataset = [tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size]) for _ in range(num_r)]\n",
    "  valid_labels = tf.placeholder(tf.int32, shape=[batch_size])\n",
    "\n",
    "  with tf.variable_scope('sentiment_analysis'):\n",
    "      # First convolution layer weights/bias\n",
    "      sent_w1 = tf.get_variable('conv_w1', shape=[conv_width,1,1], initializer = tf.contrib.layers.xavier_initializer_conv2d())\n",
    "      sent_b1 = tf.get_variable('conv_b1',shape=[1], initializer = tf.random_normal_initializer(stddev=0.05))\n",
    "      \n",
    "      # Concat all the train data and create a tensor of [batch_size, num_r, vocabulary_size]\n",
    "      concat_train_dataset = tf.concat([tf.expand_dims(t,0) for t in train_dataset],axis=0)\n",
    "      concat_train_dataset = tf.transpose(concat_train_dataset, [1,0,2]) # make batch-major (axis)\n",
    "      \n",
    "      concat_train_dataset = tf.reshape(concat_train_dataset, [batch_size, -1])\n",
    "      \n",
    "      # Compute the convolution output on the above transformation of inputs\n",
    "      sent_h = tf.nn.relu(\n",
    "          tf.nn.conv1d(tf.expand_dims(concat_train_dataset,-1),filters=sent_w1,stride=conv_stride, padding='SAME') + sent_b1\n",
    "      )\n",
    "\n",
    "      # Do the same for validation data\n",
    "      concat_valid_dataset = tf.concat([tf.expand_dims(t,0) for t in valid_dataset],axis=0)\n",
    "      concat_valid_dataset = tf.transpose(concat_valid_dataset, [1,0,2]) # make batch-major (axis)\n",
    "      concat_valid_dataset = tf.reshape(concat_valid_dataset, [batch_size, -1])\n",
    "    \n",
    "      # Compute the validation output\n",
    "      sent_h_valid = tf.nn.relu(\n",
    "          tf.nn.conv1d(tf.expand_dims(concat_valid_dataset,-1),filters=sent_w1,stride=conv_stride, padding='SAME') + sent_b1\n",
    "      )\n",
    "\n",
    "      sent_h = tf.reshape(sent_h, [batch_size, -1])\n",
    "      sent_h_valid = tf.reshape(sent_h_valid, [batch_size, -1])\n",
    "        \n",
    "      # Linear Layer\n",
    "      sent_w = tf.get_variable('linear_w', shape=[num_r, 1], initializer= tf.contrib.layers.xavier_initializer())\n",
    "      sent_b = tf.get_variable('linear_b', shape=[1], initializer= tf.random_normal_initializer(stddev=0.05))\n",
    "\n",
    "      # Compute the final output with the linear layer defined above\n",
    "      sent_out = tf.matmul(sent_h,sent_w)+sent_b\n",
    "      tr_train_predictions = tf.nn.sigmoid(tf.matmul(sent_h, sent_w) + sent_b)\n",
    "      tf_valid_predictions = tf.nn.sigmoid(tf.matmul(sent_h_valid, sent_w) + sent_b)\n",
    "    \n",
    "      # Calculate valid accuracy\n",
    "      valid_pred_classes = tf.cast(tf.reshape(tf.greater(tf_valid_predictions, 0.5),[-1]),tf.int32)\n",
    "    \n",
    "      # Loss computation and optimization\n",
    "      naive_sent_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.expand_dims(train_labels,-1), logits=sent_out))\n",
    "      naive_sent_optimizer = tf.train.AdamOptimizer(learning_rate = 0.0005).minimize(naive_sent_loss)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      ".....Average loss at step 500: 0.692977\n",
      "[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 1 1 1 1 1 1 1 1 1 1 1 1]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1\n",
      " 1 1 1 1 1 1 1 1 1 1 1 1 1]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 53.00000\n",
      ".\n",
      "Train Predictions:\n",
      "[0.5398299  0.52148247 0.57564086 0.6057088  0.5066216  0.4995854\n",
      " 0.52885103 0.5014624  0.5738266  0.51268613 0.5864872  0.5437006\n",
      " 0.5601032  0.49563897 0.5208909  0.54059374 0.5325987  0.56131095\n",
      " 0.5542273  0.54287297 0.6145708  0.56996554 0.53303754 0.5173336\n",
      " 0.54148394 0.59661555 0.501636   0.50293607 0.50782865 0.5669592\n",
      " 0.5849927  0.5362609  0.5282925  0.5504024  0.5769378  0.4991507\n",
      " 0.51285356 0.5656118  0.5401868  0.5354448  0.5168529  0.51380587\n",
      " 0.57529366 0.49920115 0.49638954 0.52626103 0.6057066  0.5653592\n",
      " 0.5580887  0.5387444 ]\n",
      "[1 0 1 1 1 0 0 0 0 0 1 1 1 0 0 1 1 1 1 0 1 1 0 0 1 1 0 0 0 1 1 0 0 1 0 0 0\n",
      " 1 0 1 0 0 1 0 0 0 1 1 1 0]\n",
      ".\n",
      "Train Predictions:\n",
      "[0.53347856 0.6433935  0.53555137 0.52461207 0.5592669  0.55867225\n",
      " 0.48304006 0.5134755  0.58634436 0.55636543 0.5513616  0.5296205\n",
      " 0.55686915 0.55245316 0.6191605  0.57981074 0.5266938  0.50180537\n",
      " 0.58336717 0.5536292  0.58773464 0.6358548  0.5962003  0.55425674\n",
      " 0.55611014 0.532857   0.54292274 0.59357536 0.54953194 0.6075142\n",
      " 0.53021115 0.59705454 0.52734005 0.51539934 0.61214507 0.5445286\n",
      " 0.53471303 0.5680835  0.5015975  0.5434597  0.51566607 0.5103009\n",
      " 0.52286553 0.576073   0.5658488  0.55518544 0.56711227 0.50235504\n",
      " 0.62453496 0.5030525 ]\n",
      "[0 1 1 0 1 1 0 0 1 1 1 0 1 1 1 1 1 0 1 1 1 1 1 1 0 0 1 1 1 1 0 1 0 1 1 1 0\n",
      " 1 0 1 0 1 0 1 0 1 0 0 1 0]\n",
      "..\n",
      "Train Predictions:\n",
      "[0.6281669  0.5693149  0.55835557 0.58671427 0.5954251  0.528519\n",
      " 0.544164   0.5538292  0.53524363 0.56180054 0.57146865 0.6600864\n",
      " 0.5950348  0.70271945 0.51135105 0.50162274 0.61257565 0.5346548\n",
      " 0.640032   0.47915387 0.57746637 0.55718285 0.6125209  0.5111741\n",
      " 0.55690384 0.48271057 0.4808032  0.5042626  0.55664563 0.49220365\n",
      " 0.5063578  0.616078   0.5226268  0.4923374  0.52179915 0.49335942\n",
      " 0.6447095  0.664009   0.56390184 0.6743909  0.5994003  0.5214779\n",
      " 0.6158631  0.49849156 0.68111086 0.6745049  0.5391151  0.63389707\n",
      " 0.4776263  0.62716454]\n",
      "[1 1 0 1 0 0 0 0 0 0 0 1 1 1 0 0 1 0 1 0 0 1 0 1 1 0 0 0 0 0 0 0 0 0 0 0 1\n",
      " 1 1 1 1 0 1 0 1 1 0 1 0 1]\n",
      ".\n",
      "Average loss at step 1000: 0.662195\n",
      "[1 1 1 1 0 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1\n",
      " 1 1 1 1 1 1 1 1 1 1 1 0 1]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 0 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 0 0 1\n",
      " 1 1 1 1 1 1 1 0 1 1 1 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 65.00000\n",
      ".\n",
      "Train Predictions:\n",
      "[0.48791286 0.4814993  0.520895   0.5006049  0.49282727 0.5358579\n",
      " 0.5450098  0.67067295 0.55139095 0.49364725 0.7318095  0.47335815\n",
      " 0.52904165 0.7087658  0.46180406 0.54840136 0.5043462  0.65554774\n",
      " 0.7049975  0.6000343  0.47732767 0.54674435 0.65389025 0.5104879\n",
      " 0.58158284 0.4831877  0.5272517  0.57363915 0.6496185  0.5163783\n",
      " 0.51789886 0.6816802  0.5373244  0.49532205 0.5136856  0.5419302\n",
      " 0.5756462  0.50959605 0.61040175 0.6322758  0.6349979  0.51941353\n",
      " 0.5465167  0.6184086  0.51248395 0.7269664  0.7450346  0.6091433\n",
      " 0.47902462 0.55470395]\n",
      "[0 0 0 0 0 0 0 1 0 0 1 0 1 1 0 1 0 1 1 1 0 0 1 0 0 0 0 1 1 0 1 1 0 1 0 0 1\n",
      " 0 1 1 1 0 1 1 0 1 1 1 0 0]\n",
      "..\n",
      "Train Predictions:\n",
      "[0.5098728  0.5194385  0.6875274  0.61823547 0.7548512  0.44482762\n",
      " 0.44358504 0.7519445  0.6688327  0.50066286 0.59024185 0.5307202\n",
      " 0.47687507 0.53811574 0.63883483 0.6302862  0.49293384 0.5256194\n",
      " 0.7115141  0.658766   0.69207716 0.5598784  0.5788378  0.50958776\n",
      " 0.58766043 0.5487388  0.5533414  0.485756   0.5760571  0.602522\n",
      " 0.6970989  0.72705626 0.6940404  0.47365338 0.6532596  0.7173378\n",
      " 0.6458795  0.4766092  0.51574093 0.5136968  0.6480589  0.4484929\n",
      " 0.5416534  0.6618099  0.7148603  0.47062722 0.5329655  0.5864267\n",
      " 0.4752485  0.4735495 ]\n",
      "[0 0 1 1 1 0 0 1 1 0 1 1 0 0 1 0 0 0 1 1 1 0 1 0 1 1 1 0 1 1 1 1 1 0 1 0 1\n",
      " 0 0 0 1 0 0 1 1 0 1 1 0 0]\n",
      "..Average loss at step 1500: 0.604544\n",
      "[1 1 1 0 0 1 0 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 0 1 1 0 1 1 1 1 1 0 1\n",
      " 0 0 1 1 1 1 1 1 1 1 1 0 0]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 0 0 0 1 1 1 0 1 1 1 0 0 0\n",
      " 1 1 1 1 1 1 1 0 1 1 0 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 72.00000\n",
      ".\n",
      "Train Predictions:\n",
      "[0.4315178  0.45389757 0.64561284 0.47607657 0.46056148 0.5454631\n",
      " 0.423389   0.7420383  0.5089923  0.48817888 0.6571946  0.48824877\n",
      " 0.6685424  0.53417534 0.568409   0.8927229  0.5833828  0.43795457\n",
      " 0.69632477 0.45559096 0.46451446 0.52789664 0.7403449  0.6548991\n",
      " 0.42082536 0.55061114 0.4865282  0.46559292 0.53477305 0.5916949\n",
      " 0.6541726  0.69069886 0.49422452 0.45782846 0.49949333 0.43304244\n",
      " 0.6285547  0.62949055 0.49287277 0.6041156  0.6176573  0.70044106\n",
      " 0.5232215  0.7203313  0.51045394 0.4905259  0.6457035  0.50709075\n",
      " 0.42574662 0.62621313]\n",
      "[0 0 1 0 0 1 0 1 0 0 0 0 1 1 0 1 0 0 1 0 0 0 1 1 0 1 0 0 1 1 1 1 1 0 0 0 1\n",
      " 1 0 1 1 1 0 1 1 1 1 0 0 0]\n",
      "....\n",
      "Average loss at step 2000: 0.553805\n",
      "[1 1 1 0 0 1 0 1 0 1 0 1 1 1 1 1 1 0 1 1 1 1 0 1 1 1 0 1 1 0 1 1 1 1 1 0 1\n",
      " 0 0 1 1 0 1 0 1 1 1 1 0 0]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 0 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 1 1 1 1 0 1 0 0 0 1 0 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 77.00000\n",
      "..\n",
      "Train Predictions:\n",
      "[0.41134104 0.61209303 0.56754607 0.5942014  0.3931868  0.39387563\n",
      " 0.34629035 0.888453   0.35071236 0.37173945 0.60314333 0.7663671\n",
      " 0.7520451  0.6520177  0.3991184  0.69260484 0.76425534 0.45837578\n",
      " 0.59188765 0.6158909  0.46639478 0.85858035 0.5178756  0.7395688\n",
      " 0.49670693 0.5715366  0.4865879  0.53280157 0.36719364 0.40153322\n",
      " 0.77727467 0.79807955 0.55325514 0.4443964  0.8349032  0.47773615\n",
      " 0.7588399  0.8458654  0.36635584 0.58734226 0.8620448  0.3692674\n",
      " 0.4402245  0.8446156  0.40765733 0.83284456 0.3977775  0.46450433\n",
      " 0.40592468 0.72534925]\n",
      "[1 1 0 1 0 0 0 1 0 0 1 1 0 1 0 0 1 0 1 1 0 1 0 1 0 1 0 1 0 1 1 0 0 0 1 1 1\n",
      " 1 0 0 1 0 1 1 0 1 0 0 0 1]\n",
      "...Average loss at step 2500: 0.506576\n",
      "[1 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 1 0 1 1 0 1 1 1 1 1 0 1\n",
      " 0 0 1 1 0 1 0 1 1 1 1 0 0]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 0 1 1 1 1 1 1 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 0 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 1 1 1 1 1 1 0 0 0 1 0 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 79.00000\n",
      ".\n",
      "Train Predictions:\n",
      "[0.33002415 0.4768433  0.71964574 0.36330324 0.38838214 0.5400787\n",
      " 0.9264936  0.8705246  0.36026055 0.5957628  0.31326458 0.5235184\n",
      " 0.40750557 0.52223855 0.35754454 0.7959072  0.88170767 0.5885287\n",
      " 0.86543477 0.88090783 0.8721879  0.847802   0.6520115  0.70028365\n",
      " 0.44879344 0.39379698 0.7983082  0.36156422 0.42037308 0.7572798\n",
      " 0.95379436 0.36944956 0.50819314 0.64749444 0.6819545  0.708875\n",
      " 0.3632382  0.3539295  0.34058285 0.42926428 0.61869305 0.42673722\n",
      " 0.6231301  0.47047475 0.7055723  0.40796205 0.72998905 0.51705253\n",
      " 0.47094813 0.89795476]\n",
      "[0 1 1 0 0 0 1 1 0 1 0 0 0 1 0 1 1 1 1 1 1 1 1 1 0 0 1 0 0 1 1 0 0 1 1 1 0\n",
      " 0 0 0 1 0 1 0 1 0 1 1 0 1]\n",
      "....\n",
      "Average loss at step 3000: 0.470163\n",
      "[1 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 1 0 1 1 0 0 1 1 1 0 0 1\n",
      " 0 0 1 1 0 1 0 0 1 1 1 0 0]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 0 1 1 1 1 1 1 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 0 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 1 0 1 1 0 1 0 0 0 1 0 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 80.00000\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      ".....Average loss at step 3500: 0.436069\n",
      "[1 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 1 1 1 0 0 1 1 0 1 1 0 0 1 1 1 0 0 1\n",
      " 0 0 1 1 0 1 0 0 1 1 1 0 0]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 0 1 1 1 1 1 1 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 0 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 1 0 1 1 0 1 0 0 0 1 0 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 81.00000\n",
      ".\n",
      "Train Predictions:\n",
      "[0.9191679  0.39319715 0.36287513 0.29251912 0.5680888  0.93126684\n",
      " 0.39693502 0.42238235 0.8174568  0.4630281  0.9039584  0.29418328\n",
      " 0.9528309  0.8534074  0.8707318  0.58283484 0.5931104  0.85520005\n",
      " 0.26581055 0.6447772  0.29716638 0.9423392  0.62046814 0.4785868\n",
      " 0.55562043 0.32718307 0.95256895 0.30318755 0.30430165 0.8483462\n",
      " 0.36887115 0.53116053 0.5989425  0.3178296  0.38332495 0.77783126\n",
      " 0.30041996 0.603104   0.28473738 0.71489453 0.3863738  0.9642126\n",
      " 0.79948604 0.38027152 0.7271056  0.82725906 0.7714398  0.3602082\n",
      " 0.42170653 0.85354847]\n",
      "[1 0 0 0 1 1 0 0 1 0 1 0 1 1 1 0 0 1 0 1 0 1 1 0 0 0 1 0 0 1 0 1 1 0 0 1 1\n",
      " 0 0 1 0 1 1 0 1 1 1 0 0 1]\n",
      "....\n",
      "Average loss at step 4000: 0.411540\n",
      "[1 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 1 1 1 0 0 1 1 0 1 1 0 0 1 1 1 0 0 1\n",
      " 0 0 1 1 0 1 0 0 1 1 1 0 0]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 0 1 1 1 1 1 1 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 0 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 1 1 1 1 0 1 0 0 0 1 0 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 80.00000\n",
      ".....Average loss at step 4500: 0.385476\n",
      "[1 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 1 1 1 0 0 1 1 0 1 1 0 0 1 1 1 0 0 1\n",
      " 0 0 1 1 0 1 0 0 1 1 1 0 0]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 0 1 1 1 1 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 1 0 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 1 0 1 1 0 1 0 0 0 1 0 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 82.00000\n",
      "....\n",
      "Train Predictions:\n",
      "[0.445999   0.71250945 0.313586   0.21888927 0.27181625 0.8507454\n",
      " 0.9426271  0.3441632  0.35008994 0.3063037  0.27739283 0.7026378\n",
      " 0.34773284 0.8787352  0.23478411 0.8135679  0.44427317 0.9314392\n",
      " 0.38716114 0.6639077  0.7057945  0.4718799  0.35794237 0.98790854\n",
      " 0.91169614 0.26889685 0.2932376  0.7721444  0.17603622 0.20813322\n",
      " 0.35758796 0.8773608  0.37334773 0.21646939 0.92994076 0.22375461\n",
      " 0.9723795  0.99345994 0.9446983  0.8342561  0.52776575 0.8936671\n",
      " 0.19260544 0.48494402 0.7851082  0.6420033  0.84236765 0.8454488\n",
      " 0.44078887 0.9598666 ]\n",
      "[0 0 0 0 0 1 1 0 0 0 0 1 0 1 0 1 0 1 1 1 1 0 0 1 1 0 0 1 0 0 0 1 1 0 1 0 1\n",
      " 1 1 1 0 1 0 0 1 1 1 1 0 1]\n",
      ".\n",
      "Average loss at step 5000: 0.361936\n",
      "[1 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 1 1 1 0 0 1 1 0 1 1 0 0 1 1 1 0 0 1\n",
      " 0 0 1 1 0 1 0 0 1 1 1 0 0]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 0 1 1 1 1 1 1 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 0 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 1 1 1 1 0 1 0 0 0 1 0 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 80.00000\n",
      ".....Average loss at step 5500: 0.342175\n",
      "[1 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 1 1 1 0 0 1 1 0 1 1 0 0 1 1 1 0 0 1\n",
      " 0 0 1 1 0 1 0 0 1 1 1 0 0]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 0 1 1 1 1 1 1 0 1 1 1 1 0 0 1 1 0 1 0 1 1 1 0 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 1 1 1 1 0 1 0 0 0 1 0 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 79.00000\n",
      ".....\n",
      "Average loss at step 6000: 0.320207\n",
      "[1 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 1 1 1 0 0 1 1 0 1 1 0 0 1 1 0 0 0 1\n",
      " 0 0 1 1 0 1 0 0 1 1 1 0 0]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 0 1 1 1 1 1 1 0 1 1 1 1 0 0 1 1 0 1 0 1 1 0 0 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 1 1 1 1 0 1 0 0 0 1 0 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 79.00000\n",
      "\n",
      "Train Predictions:\n",
      "[0.2474751  0.16171446 0.14716402 0.30703023 0.16023324 0.15169615\n",
      " 0.6506419  0.91551065 0.6012802  0.95176613 0.3422212  0.29913694\n",
      " 0.4426065  0.9572496  0.69383496 0.8335252  0.13502201 0.9008388\n",
      " 0.19395785 0.2358436  0.53258    0.8835747  0.14410333 0.5513723\n",
      " 0.64082915 0.712572   0.8347874  0.9906556  0.71989053 0.790358\n",
      " 0.24802068 0.33994392 0.20042835 0.4125262  0.98504686 0.23408923\n",
      " 0.5074064  0.903543   0.8827051  0.196563   0.91896045 0.89443755\n",
      " 0.1672556  0.6008504  0.9996226  0.99149865 0.24758811 0.12525511\n",
      " 0.12656966 0.85832536]\n",
      "[0 0 0 0 0 0 1 1 1 1 1 0 0 1 1 1 0 1 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 0 1 0 1\n",
      " 1 1 0 1 1 0 1 1 1 0 0 0 1]\n",
      "...\n",
      "Train Predictions:\n",
      "[0.8114061  0.22110778 0.34172592 0.9971512  0.56375897 0.8340649\n",
      " 0.96590835 0.17079368 0.29629225 0.11837009 0.42761853 0.95634806\n",
      " 0.90265834 0.8524951  0.300094   0.79758614 0.93871796 0.89040077\n",
      " 0.9368285  0.7110306  0.87481415 0.18790726 0.19441049 0.31186992\n",
      " 0.300094   0.9635268  0.94075364 0.9698821  0.8957142  0.9062844\n",
      " 0.62651515 0.293566   0.19036314 0.6378857  0.17984593 0.24614628\n",
      " 0.768548   0.24131991 0.8377169  0.88089556 0.9224784  0.8992288\n",
      " 0.15226977 0.9252743  0.4967404  0.96023774 0.94137704 0.18250872\n",
      " 0.3430246  0.14960437]\n",
      "[1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0 0 1 1 1 1 1 1 1 0 0 0 0 0 1\n",
      " 0 1 1 1 1 0 1 0 1 1 0 0 0]\n",
      "..Average loss at step 6500: 0.306038\n",
      "[0 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 1 1 1 0 0 1 1 0 1 1 0 0 1 1 1 0 0 1\n",
      " 0 0 1 1 0 1 0 0 1 1 1 0 0]\n",
      "[1 1 1 0 0 1 0 1 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0\n",
      " 0 0 1 1 1 1 0 1 1 0 1 0 1]\n",
      "\n",
      "[1 1 0 1 1 1 1 1 1 0 1 1 1 1 0 0 1 1 0 1 0 1 1 0 0 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 1 1 1 1 0 1 0 0 0 0 0 1 0]\n",
      "[1 1 0 1 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 0 1 1 1 0 1 0 0 1 1 1 1 0 1 1 0 0 0\n",
      " 0 0 1 1 0 0 0 0 0 0 0 0 0]\n",
      "\n",
      "Valid accuracy: 80.00000\n",
      "...."
     ]
    }
   ],
   "source": [
    "num_steps = 10001\n",
    "\n",
    "naive_valid_ot = []\n",
    "with tf.Session(graph=graph,config=tf.ConfigProto(allow_soft_placement=True)) as session:\n",
    "  tf.global_variables_initializer().run()\n",
    "  print('Initialized')\n",
    "  average_loss = 0\n",
    "  for step in range(num_steps):\n",
    "    if (step+1)%100==0:\n",
    "        print('.',end='')\n",
    "    if (step+1)%1000==0:\n",
    "        print('')\n",
    "        \n",
    "    batches_data, batch_labels = generate_sentiment_batch(batch_size, region_size,is_train=True)\n",
    "    \n",
    "    feed_dict = {}\n",
    "    #print(len(batches_data))\n",
    "    for ri, batch in enumerate(batches_data):    \n",
    "        feed_dict[train_dataset[ri]] = batch\n",
    "        \n",
    "    feed_dict.update({train_labels : batch_labels})\n",
    "    \n",
    "    _, l, tr_batch_preds = session.run([naive_sent_optimizer, naive_sent_loss, tr_train_predictions], feed_dict=feed_dict)\n",
    "    \n",
    "    if np.random.random()<0.002:\n",
    "        print('\\nTrain Predictions:')\n",
    "        print(tr_batch_preds.reshape(-1))\n",
    "        print(batch_labels.reshape(-1))\n",
    "    average_loss += l\n",
    "            \n",
    "    if (step+1) % 500 == 0:\n",
    "      sentiment_data_index = -1\n",
    "      if step > 0:\n",
    "        average_loss = average_loss / 500\n",
    "      # The average loss is an estimate of the loss over the last 2000 batches.\n",
    "      print('Average loss at step %d: %f' % (step+1, average_loss))\n",
    "      average_loss = 0\n",
    "      \n",
    "      valid_accuracy = []\n",
    "      for vi in range(2):\n",
    "        batches_data, batch_labels = generate_sentiment_batch(batch_size, region_size,is_train=False)\n",
    "        \n",
    "        \n",
    "        feed_dict = {}\n",
    "        #print(len(batches_data))\n",
    "        for ri, batch in enumerate(batches_data):\n",
    "\n",
    "            feed_dict[valid_dataset[ri]] = batch\n",
    "        feed_dict.update({valid_labels : batch_labels})\n",
    "\n",
    "        batch_pred_classes, batch_preds = session.run([valid_pred_classes,tf_valid_predictions], feed_dict=feed_dict)\n",
    "        valid_accuracy.append(np.mean(batch_pred_classes==batch_labels)*100.0)\n",
    "        print(batch_pred_classes.reshape(-1))\n",
    "        print(batch_labels)\n",
    "        print()\n",
    "      print('Valid accuracy: %.5f'%np.mean(valid_accuracy))\n",
    "      naive_valid_ot.append(np.mean(valid_accuracy))\n",
    "      \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Generating Data Batches for Training Region Embedding Learner\n",
    "\n",
    "We define a function that takes in a `batch_size` and `region_size` to output a batch of data using the `data` list that contains all the words, we created above."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "data_index = 0\n",
    "\n",
    "def generate_region_batch(batch_size, region_size):\n",
    "  '''\n",
    "  Generates a batch of data to train the region embedding learner\n",
    "  '''\n",
    "\n",
    "  global data_index\n",
    "  \n",
    "  # Holds the data inputs of the batch (BOW)\n",
    "  batch = np.ndarray(shape=(batch_size, vocabulary_size), dtype=np.int32)\n",
    "  # Holds the data outputs of the batch (BOW)\n",
    "  labels = np.ndarray(shape=(batch_size, vocabulary_size), dtype=np.int32)\n",
    "\n",
    "  span = 2 * region_size + batch_size\n",
    "\n",
    "  # Sample a random index from data \n",
    "  data_index = np.random.randint(len(data)- span)\n",
    "\n",
    "  # Define a buffer that contains all the data within the current span\n",
    "  buffer = collections.deque(maxlen=span)\n",
    "  \n",
    "  # Update the buffer\n",
    "  for _ in range(span):\n",
    "    buffer.append(data[data_index])\n",
    "    data_index = (data_index + 1) % len(data)\n",
    "\n",
    "  current_input_start_idx = 0\n",
    "  # Populate each batch index\n",
    "  for i in range(batch_size):\n",
    "\n",
    "    batch[i,:] = np.zeros(shape=(1,vocabulary_size), dtype=np.float32) #input\n",
    "    \n",
    "    # Accumalating BOW vectors for input\n",
    "    for j in range(region_size):\n",
    "        # If the word is <unk> we ignore that word from BOW representation\n",
    "        # as that adds no value\n",
    "        if buffer[current_input_start_idx + j] != dictionary['<unk>']:\n",
    "            batch[i,buffer[current_input_start_idx + j]] += 1 \n",
    "\n",
    "    # We collect context words from both left and right\n",
    "    # The follwoing logic takes care of that\n",
    "    if current_input_start_idx > 0:\n",
    "        ids_to_left_of_input = list(range(max(current_input_start_idx - (region_size//2),0), current_input_start_idx))\n",
    "    else:\n",
    "        ids_to_left_of_input = []\n",
    "    \n",
    "    # > 0 if there are not enough words on the left side of current input region\n",
    "    amount_flow_from_left_side = (region_size//2)-len(ids_to_left_of_input)\n",
    "    ids_to_right_of_input = list(range(current_input_start_idx+region_size, current_input_start_idx+region_size+(region_size//2)+amount_flow_from_left_side))\n",
    "    assert len(ids_to_left_of_input + ids_to_right_of_input) == region_size\n",
    "    \n",
    "    \n",
    "    labels[i,:] = np.zeros(shape=(1,vocabulary_size), dtype=np.float32) #input\n",
    "    \n",
    "    # Accumulates BOW vector for output\n",
    "    for k in ids_to_left_of_input + ids_to_right_of_input:\n",
    "        # If the word is <unk> we ignore that word from BOW representation\n",
    "        # as that adds no value\n",
    "        if buffer[k] != dictionary['<unk>']:\n",
    "            labels[i,buffer[k]] += 1\n",
    "        \n",
    "    current_input_start_idx += 1\n",
    "\n",
    "  # Update the buffer\n",
    "  buffer.append(data[data_index])\n",
    "  data_index = (data_index + 1) % len(data)\n",
    "  \n",
    "  return batch, labels\n",
    "\n",
    "print('data:', [reverse_dictionary[di] for di in data[:50]])\n",
    "\n",
    "data_index = 0\n",
    "\n",
    "# Print a few batches\n",
    "for _ in range(10):\n",
    "    batch, labels = generate_region_batch(batch_size=8, region_size=4)\n",
    "    \n",
    "    print('    batch: sum: ', np.sum(batch,axis=1), np.argmax(batch,axis=1))\n",
    "    print('    labels: sum: ', np.sum(labels,axis=1), np.argmax(labels,axis=1))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Defining Region Embeddings Algorithm\n",
    "\n",
    "Here we define the algorithm for learning region embeddings. This is quite straight forward as we are basically using a target BOW representation of a region, and ask the algorithm to predict the BOW representation of the context region."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "tf.reset_default_graph()\n",
    "\n",
    "# Input/output data.\n",
    "train_dataset = tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size])\n",
    "train_labels = tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size])\n",
    "\n",
    "# Used to mask uninformative tokens\n",
    "train_mask = tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size])\n",
    "\n",
    "# Embedding learning layer\n",
    "with tf.variable_scope('region_embeddings'):\n",
    "  \n",
    "  # This is the first hidden layer and is of size vocabulary_size, 500\n",
    "  w1 = tf.get_variable('w1', shape=[vocabulary_size,500], initializer = tf.contrib.layers.xavier_initializer_conv2d())\n",
    "  b1 = tf.get_variable('b1',shape=[500], initializer = tf.random_normal_initializer(stddev=0.05))\n",
    "\n",
    "  # Compute the hidden output\n",
    "  h = tf.nn.relu(\n",
    "      tf.matmul(train_dataset,w1) + b1\n",
    "  )\n",
    "\n",
    "  # Linear Layer that outputs the predicted BOW representation\n",
    "  w = tf.get_variable('linear_w', shape=[500, vocabulary_size], initializer= tf.contrib.layers.xavier_initializer())\n",
    "  b = tf.get_variable('linear_b', shape=[vocabulary_size], initializer= tf.random_normal_initializer(stddev=0.05))\n",
    "\n",
    "  # Output\n",
    "  out =tf.matmul(h,w)+b\n",
    "\n",
    "  # Loss is the mean squared error\n",
    "  loss = tf.reduce_mean(tf.reduce_sum(train_mask*(out - train_labels)**2,axis=1))\n",
    "\n",
    "  # Minimizes the loss\n",
    "  optimizer = tf.train.AdamOptimizer(learning_rate = 0.0005).minimize(loss)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Running Region Embedding Learning Algorithm\n",
    "\n",
    "Here, using the above defined operations, we run the region embedding learning algorithm for a predefined number of steps."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "num_steps = 6001\n",
    "region_size = 10\n",
    "\n",
    "test_results = []\n",
    "session = tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True))\n",
    "\n",
    "# Initialize TensorFlow variables\n",
    "tf.global_variables_initializer().run()\n",
    "print('Initialized')\n",
    "\n",
    "average_loss = 0\n",
    "\n",
    "# Run the algorithm for several steps\n",
    "for step in range(num_steps):\n",
    "    \n",
    "    if (step+1)%100==0:\n",
    "        print('.',end='')\n",
    "    if (step+1)%1000==0:\n",
    "        print('')\n",
    "        \n",
    "    # Generate a batch of data\n",
    "    batch_data, batch_labels = generate_region_batch(batch_size, region_size)\n",
    "    \n",
    "    # We perform this to reduce the effect of 0s in the batch labels during loss computations\n",
    "    # if we compute the loss naively with equal weight, the algorithm will perform poorly as \n",
    "    # there are more than 100 times zeros than ones\n",
    "    # So we normalize the loss by giving large weight to 1s and smaller weight to 0s\n",
    "    mask = ((vocabulary_size-region_size)*1.0/vocabulary_size) *np.array(batch_labels) + \\\n",
    "    (region_size*1.0/vocabulary_size)*np.ones(shape=(batch_size, vocabulary_size),dtype=np.float32)\n",
    "    mask = np.clip(mask,0,1.0)\n",
    "\n",
    "    feed_dict = {train_dataset : batch_data, \n",
    "                 train_labels : batch_labels,\n",
    "                 train_mask : mask}\n",
    "\n",
    "    # Run an optimization step\n",
    "    _, l = session.run([optimizer, loss], feed_dict=feed_dict)\n",
    "\n",
    "    average_loss += l\n",
    "\n",
    "    if (step+1) % 1000 == 0:\n",
    "      if step > 0:\n",
    "        average_loss = average_loss / 1000\n",
    "      # The average loss is an estimate of the loss over the last 2000 batches.\n",
    "      print('Average loss at step %d: %f' % (step+1, average_loss))\n",
    "      average_loss = 0\n",
    "\n",
    "# Save the weights, as these will be later used to \n",
    "# initialize a lower layer of the classifer.\n",
    "w1_arr = session.run(w1)\n",
    "b1_arr = session.run(b1)    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Sentiment Analysis with Region Embeddings\n",
    "\n",
    "Here we define a sentiment classifier that uses the region embeddings to output better classification results. There are three important components:\n",
    "\n",
    "* Convolution network performing convolutions on standard BOW representation (`sentiment_analysis`)\n",
    "* Convolution network performing convolutions on the region embeddings (`region_embeddings`)\n",
    "* Final layer that combine the outputs of above two networks to produce the final classification (`linear_layer`)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "tf.reset_default_graph()\n",
    "\n",
    "# Hyperparameters\n",
    "batch_size = 50\n",
    "region_size = 10\n",
    "\n",
    "# These are \n",
    "conv_width = vocabulary_size\n",
    "reg_conv_width = 500\n",
    "conv_stride = vocabulary_size\n",
    "reg_conv_stride = 500\n",
    "\n",
    "num_r = 100//region_size\n",
    "\n",
    "# Input/output data.\n",
    "train_dataset = [tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size], name='train_data_%d'%ri) for ri in range(num_r)]\n",
    "train_labels = tf.placeholder(tf.float32, shape=[batch_size], name='train_labels')\n",
    "\n",
    "# Testing input/output data\n",
    "valid_dataset = [tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size], name='valid_data_%d'%ri) for ri in range(num_r)]\n",
    "valid_labels = tf.placeholder(tf.int32, shape=[batch_size], name='valid_labels')\n",
    "\n",
    "variables_to_init = []\n",
    "with tf.variable_scope('region_embeddings', reuse=False):\n",
    "\n",
    "  # Getting the region embeddings weights\n",
    "  w1 = tf.get_variable('w1', shape=[vocabulary_size,500], trainable=False, initializer=tf.constant_initializer(w1_arr))\n",
    "  b1 = tf.get_variable('b1', shape=[500], trainable=False, initializer=tf.constant_initializer(b1_arr))\n",
    "                              \n",
    "  # Calculating region embeddings for all regions\n",
    "  concat_reg_emb = []\n",
    "  for t in train_dataset:\n",
    "      reg_emb = tf.nn.relu(\n",
    "          tf.matmul(t,w1) + b1\n",
    "      )\n",
    "      concat_reg_emb.append(tf.expand_dims(reg_emb,0))\n",
    "  \n",
    "  # Reshaping the region embeddings to a shape [batch_size, regions, vocabulary_size]\n",
    "  concat_reg_emb = tf.concat(concat_reg_emb,axis=0)\n",
    "  concat_reg_emb = tf.transpose(concat_reg_emb, [1,0,2])\n",
    "  concat_reg_emb = tf.reshape(concat_reg_emb, [batch_size,-1])\n",
    "  \n",
    "  # Region embeddings for valid dataset\n",
    "  concat_valid_reg_emb = []\n",
    "  for v in valid_dataset:\n",
    "      valid_reg_emb = tf.nn.relu(\n",
    "          tf.matmul(v,w1) + b1\n",
    "      )\n",
    "      concat_valid_reg_emb.append(tf.expand_dims(valid_reg_emb,0))\n",
    "  \n",
    "  # Reshaping the valid region embeddings to a shape [batch_size, regions, vocabulary_size]\n",
    "  concat_valid_reg_emb = tf.concat(concat_valid_reg_emb,axis=0)\n",
    "  concat_valid_reg_emb = tf.transpose(concat_valid_reg_emb, [1,0,2]) # batch major region embeddings\n",
    "  concat_valid_reg_emb = tf.reshape(concat_valid_reg_emb, [batch_size,-1])\n",
    "    \n",
    "  # Defining convolutions on regions (Weights and biases)\n",
    "  sentreg_w1 = tf.get_variable('reg_conv_w1', shape=[reg_conv_width,1,1], initializer = tf.contrib.layers.xavier_initializer_conv2d())\n",
    "  sentreg_b1 = tf.get_variable('reg_conv_b1',shape=[1], initializer = tf.random_normal_initializer(stddev=0.05))\n",
    "  variables_to_init.append(sentreg_w1)\n",
    "  variables_to_init.append(sentreg_b1)\n",
    "                              \n",
    "  # Doing convolutions on region embeddings\n",
    "  sentreg_h = tf.nn.relu(\n",
    "      tf.nn.conv1d(tf.expand_dims(concat_reg_emb,-1),filters=sentreg_w1,stride=reg_conv_stride, padding='SAME') + sentreg_b1\n",
    "  )\n",
    "  sentreg_h_valid = tf.nn.relu(\n",
    "      tf.nn.conv1d(tf.expand_dims(concat_valid_reg_emb,-1),filters=sentreg_w1,stride=reg_conv_stride, padding='SAME') + sentreg_b1\n",
    "  )\n",
    "  \n",
    "  # reshape the outputs of the embeddings for the top linear layer\n",
    "  sentreg_h = tf.reshape(sentreg_h, [batch_size, -1])\n",
    "  sentreg_h_valid = tf.reshape(sentreg_h_valid, [batch_size, -1])\n",
    "\n",
    "with tf.variable_scope('sentiment_analysis',reuse=False):\n",
    "  # Convolution with just BOW inputs\n",
    "  sent_w1 = tf.get_variable('conv_w1', shape=[conv_width,1,1], initializer = tf.contrib.layers.xavier_initializer_conv2d())\n",
    "  sent_b1 = tf.get_variable('conv_b1',shape=[1], initializer = tf.random_normal_initializer(stddev=0.05))\n",
    "  variables_to_init.append(sent_w1)\n",
    "  variables_to_init.append(sent_b1)\n",
    "                              \n",
    "  concat_train_dataset = tf.concat([tf.expand_dims(t,0) for t in train_dataset],axis=0)\n",
    "  concat_train_dataset = tf.transpose(concat_train_dataset, [1,0,2]) # make batch-major (axis)\n",
    "  concat_train_dataset = tf.reshape(concat_train_dataset, [batch_size, -1])\n",
    "  sent_h = tf.nn.relu(\n",
    "      tf.nn.conv1d(tf.expand_dims(concat_train_dataset,-1),filters=sent_w1,stride=conv_stride, padding='SAME') + sent_b1\n",
    "  )\n",
    "\n",
    "  # Valid data convolution\n",
    "  concat_valid_dataset = tf.concat([tf.expand_dims(v,0) for v in valid_dataset],axis=0)\n",
    "  concat_valid_dataset = tf.transpose(concat_valid_dataset, [1,0,2]) # make batch-major (axis)\n",
    "  concat_valid_dataset = tf.reshape(concat_valid_dataset, [batch_size, -1])\n",
    "\n",
    "  sent_h_valid = tf.nn.relu(\n",
    "      tf.nn.conv1d(tf.expand_dims(concat_valid_dataset,-1),filters=sent_w1,stride=conv_stride, padding='SAME') + sent_b1\n",
    "  )\n",
    "\n",
    "  # reshape the outputs of the embeddings for the top linear layer\n",
    "  sent_h = tf.reshape(sent_h, [batch_size, -1])\n",
    "  sent_h_valid = tf.reshape(sent_h_valid, [batch_size, -1])\n",
    "\n",
    "with tf.variable_scope('top_layer', reuse=False):\n",
    "  # Linear Layer (output)\n",
    "  sent_w = tf.get_variable('linear_w', shape=[num_r*2, 1], initializer= tf.contrib.layers.xavier_initializer())\n",
    "  sent_b = tf.get_variable('linear_b', shape=[1], initializer= tf.random_normal_initializer(stddev=0.05))\n",
    "  variables_to_init.append(sent_w)\n",
    "  variables_to_init.append(sent_b)\n",
    "\n",
    "  # Here we feed in a combination of the BOW representation and region embedding\n",
    "  # related hidden outputs to the final classification layer\n",
    "  sent_hybrid_h = tf.concat([sentreg_h, sent_h],axis=1)\n",
    "  sent_hybrid_h_valid = tf.concat([sentreg_h_valid, sent_h_valid],axis=1)\n",
    "\n",
    "  # Output values\n",
    "  sent_out = tf.matmul(sent_hybrid_h,sent_w)+sent_b\n",
    "  tr_train_predictions = tf.nn.sigmoid(sent_out)\n",
    "  tf_valid_predictions = tf.nn.sigmoid(tf.matmul(sent_hybrid_h_valid, sent_w) + sent_b)\n",
    "\n",
    "  # Calculate valid accuracy\n",
    "  valid_pred_classes = tf.cast(tf.reshape(tf.greater(tf_valid_predictions, 0.5),[-1]),tf.int32)\n",
    "\n",
    "# Loss computation and optimization\n",
    "with tf.variable_scope('sentiment_with_region_embeddings'):\n",
    "  sent_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.expand_dims(train_labels,-1), logits=sent_out))\n",
    "  sent_optimizer = tf.train.AdamOptimizer(learning_rate = 0.0005).minimize(sent_loss)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "num_steps = 10001\n",
    "\n",
    "reg_valid_ot = []\n",
    "with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as session:\n",
    "    \n",
    "  tf.global_variables_initializer().run()\n",
    "  print('Initialized')\n",
    "  average_loss = 0\n",
    "  for step in range(num_steps):\n",
    "    print('.',end='')\n",
    "    if (step+1)%100==0:\n",
    "        print('')\n",
    "    \n",
    "    batches_data, batch_labels = generate_sentiment_batch(batch_size, region_size,is_train=True)\n",
    "    \n",
    "    feed_dict = {}\n",
    "    #print(len(batches_data))\n",
    "    for ri, batch in enumerate(batches_data):\n",
    "        \n",
    "        feed_dict[train_dataset[ri]] = batch\n",
    "    feed_dict.update({train_labels : batch_labels})\n",
    "    \n",
    "    _, l, tr_batch_preds = session.run([sent_optimizer, sent_loss, tr_train_predictions], feed_dict=feed_dict)\n",
    "    \n",
    "    if np.random.random()<0.002:\n",
    "        print('\\nTrain Predictions:')\n",
    "        print((tr_batch_preds>0.5).astype(np.int32).reshape(-1))\n",
    "        print(batch_labels.reshape(-1))\n",
    "    average_loss += l\n",
    "            \n",
    "    if (step+1) % 500 == 0:\n",
    "      sentiment_data_index = -1\n",
    "      if step > 0:\n",
    "        average_loss = average_loss / 500\n",
    "      # The average loss is an estimate of the loss over the last 2000 batches.\n",
    "      print('Average loss at step %d: %f' % (step+1, average_loss))\n",
    "      average_loss = 0\n",
    "      \n",
    "      valid_accuracy = []\n",
    "      for vi in range(2):\n",
    "        batches_vdata, batch_vlabels = generate_sentiment_batch(batch_size, region_size,is_train=False)\n",
    "        \n",
    "        feed_dict = {}\n",
    "        for ri, batch in enumerate(batches_vdata):\n",
    "            feed_dict[valid_dataset[ri]] = batch\n",
    "        feed_dict.update({valid_labels : batch_vlabels})\n",
    "\n",
    "        batch_pred_classes, batch_preds = session.run([valid_pred_classes,tf_valid_predictions], feed_dict=feed_dict)\n",
    "        valid_accuracy.append(np.mean(batch_pred_classes==batch_vlabels)*100.0)\n",
    "        print(batch_pred_classes.reshape(-1))\n",
    "        print(batch_vlabels)\n",
    "        print()\n",
    "      print('Valid accuracy: %.5f'%np.mean(valid_accuracy))\n",
    "      reg_valid_ot.append(np.mean(valid_accuracy))\n",
    "      \n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Plot the Results\n",
    "Here we plot the accuracies for standard sentiment classifier as well as the region embedding classifier."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAA4AAAAFICAYAAAABEJCnAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzs3Xd8luW9x/HPlb0HJGSHDQESCFOl4gBxoMW96zr16Dmn\n47RVa+uou9Xu09pqbW2rtmJddSBuceBghxH2DJkQQvZ8nuc6f9zhSUIYCSR5Mr7v14tXvO/7eu78\nEkm4f8/vuq6fsdYiIiIiIiIi/Z+frwMQERERERGRnqEEUEREREREZIBQAigiIiIiIjJAKAEUERER\nEREZIJQAioiIiIiIDBBKAEVERERERAYIJYAiIiIiIiIDhBJAERERERGRAUIJoIiIiIiIyAAR4OsA\nukJcXJwdNmyYr8MQERERERHxiZUrV5Zaa+OPNa5fJIDDhg1jxYoVvg5DRERERETEJ4wxuzsyTlNA\nRUREREREBgglgCIiIiIiIgOEEkAREREREZEBQgmgiIiIiIjIAKEEUEREREREZIBQAigiIiIiIjJA\nKAEUEREREREZIJQAioiIiIiIDBBKAEVERERERAaIAF8HICIiIiJyVI21ULwOinKcj656X0d0FAZG\nnQVZl4Ofai3S+ygBFBEREZHeo6G6Odlb4yR8hTlQuhmsx9eRddy6F2HF0zDvl5A00dfRiLShBFBE\nREREfKOhGorXOkmeN9nbAti244w/JGRCUjYkTYLQGJ+E2yE1pbDkN7BnKTx1Okz7Jsy+G0JjfR2Z\nCKAEUERERER6QkMVFK1tSfSKcqB0K+2SPb8AGDLOSfaSs52PCRMgMNQnYR+XydfCx4/C0j/B8j9D\n7qtw1v2Q/Q1NCxWfM9baY4/q5aZNm2ZXrFjh6zBEREREBKC+sn1lb/822id7gU6ydzDRS86GIRMg\nMMQnYXe5klxYdAfs/tw5TpkG834BKVN8G5f0S8aYldbaacccpwRQRERERI5bfYWzXq91sle2vf04\nv0BIGN++shcQ3PMx9yRrYd3L8N49UF0MGJh6I8z5CYQN8nV00o/0iQTQGPN94Gact4PWATcBTwKn\nAxXNw2601uYc7T5KAEVERER6QF15281ZinKgbEf7cf5BTnJ3cM1ecjYMGd//k72jqa+ETx6DpU+C\nx+WsCZxzH0y5Hvz8fR2d9AO9PgE0xqQAS4Dx1to6Y8yLwCLgDGChtfbljt5LCaCIiIhIF6s70L6y\nd2Bn+3H+wU6y13oaZ/w4CAjq+Zj7gr2b4O07YOenznHyZGe30NRjPreLHFVHE0BfbwITAIQaY5qA\nMKDQx/GIiIiIdA93E2x5Fza+AU21vo7myNwu2LsByne3v+YfDImZbadxDhkH/oE9H2dfNSQDrn8D\ncv8N794NhavhL3Ng8nXORjHhcb6OUA7VWOus5zz4RogBLvyDr6M6br6eAvq/wCNAHfCetfZaY8zf\ngVOABuBD4EfW2obDvPYW4BaA9PT0qbt3H+aXlIiIiIivHdgNq56F1f9oXgPWRwSEQGJW22mc8RlK\n9rpSQzV8+gv48g/gaYKQaJh9L0z7D00L9ZXG2uY+lK2mOe/bDNbdMiYwHH68p9f9P+oLU0BjgVeA\nK4Fy4CXgZZykrxgIAp4CtltrHzzavTQFVERERHoVdxNsfhtW/h22f4R398vBo2HKdRA7zIfBHYuB\nwSMhbiz4+3qy2ABRutXZLXTHYuc4cSKc/ytIm+HbuPq7xhon2WvTh3IzWE/bccbfefMjufnNkKRs\nZ8puH00AfflTfRaw01q7D8AY8yow01r7j+brDcaYvwG3+ypAERERkU4p29lS7avZ65zzD4bxFzo7\nPw6dCcb4NETpheJGw3X/ho1vwrt3OS00np4Lk66BuQ9AxBBfR9j3NVS3r+yVbjl8speQ2X632qAw\n38TdDXyZAOYBJxtjwnCmgM4BVhhjkqy1RcYYA1wErPdhjCIiIiJH52qEzW/BymdaKjjgVNCm3QQT\nr9R2/3JsxsD4+TDqLPjsV/DF72DN87DpLTjzLph+syqyHdVQ3b4PZekW2vehDHD6TiY3V/WSJzvJ\nXmCoT8LuKb5eA/gAzhRQF7AapyXE20A8zvLKHOC/rLXVR7uPpoCKiIhIj9u/HVY9A6v/CbWlzrmA\nEJhwsVPtSztJ1T45fvu3w9t3wrb3neOETKeJ/NCZvo2rt6mvPExlbyuHT/bGtarsHUz2QnwSdnfo\n9WsAu5ISQBEREekRrgbYtNBZ23dwG39wetxNvREmXuH0dxPpCtbC5kXwzo+gPM85N/FKmPsgRCb6\nNjZfqK9s34dy/3baJ3uBTrLXujXJkP6V7B1OX1gDKCIiItI3lG6DVX+HnOehdr9zLiAUMi91Er/U\naar2SdczBjLOh5GzYclvYclvYO2/YNMiOONHcNKt/XdX1vqK9n0oy7a3H+cX2KoP5aSWNXsBwT0f\ncx+hCqCIiHQda2H9K7D+VfC4fB3NkfkHtp0KFJWih/ejsdapPrR+CIsd3vLueuyw/vn9a6pvqfbt\n+qzlfEKmk/RlXQ6hMb6KTgaisp3wzo9hy9vOcfw4Z1ro8Fm+jetE1ZW3r+yV7Wg/zj/ISe7a9KEc\nDwFBPR9zL6QpoCIi0rNKNjjbmO9e4utIOi8sruVh4mC/s+i0/pnUHIu1TgPw1u+6F62BurIjvyYk\npuX7dvB7OGhE3/3+7dvirO3Leb7l6w4Ma6723QQpU/ru1yb9w5Z3nfWBB3Y6x5mXwtkPQ1Syb+Pq\niLoD7St7B7+O1vyDW1X2mhO++HFK9o5CCaCIiPSM+gr4+DFY+qTTKDdsMJx+J8QM9XVkR9Z4yHbg\n9eXtx4QNbplOdPABJCa9fz34W+s8eB2a7B3x+9H8vRg82qkCHnxdzb7244OjIWliq4e3yU7V0M+v\n+7+u49FUBxvecKp9eV+0nE/McpK+rMshJMpn4Ym001QPX/ze2THUVQdBEXD6D+Gk/+49SVJtWfvK\n3oFd7cf5B0PiIa0Xhozrv9Nbu4kSQBER6V7WOmtR3rvX6Xdm/GDaN53tyvvSlvdtKl6tHlQOV/EK\nHdS20pWc7SS6fSEptNaZUtX6QaxojZPAH6p1RfTgx+jUw3+d1kJlYcv9Dt67uqT92OCo5qR6kpMQ\nJmU7lUJfJoV7NzrtG9YsaEl8A8Mh6zJnmmfy5L7x/1cGrgO7nd6BmxY6x3Fj4Lyfw8gzezaO2jIo\nXN32d8zBjWtaCwhx3lhp/QZbfIaSvS6gBFBERLpP8Tpnumfel85x6gw4/5fOP+j9gbVQseeQylhO\ny+YfrYXGtq8U+npNnMfTXNlr/TC2FhoOk+yFD2mf7EUln3j8lUVtv3eFOVBd3H5ccBQkTmw7fXTw\nqO5NChtrYcPrTrVvz1ct55MnO0lf5qUQHNl9n1+kO2z7ABb9sGWjlPEXwjk/dd686Wo1+6Fodauf\n7zVQcbhkL9RJ9lr/jokbq36G3UQJoIiIdL26clj8U1j+Z7AeCI93tiOfeFXvndrXVayFivz2Sc3B\n/m+thUS3Twq7a02cx9N2OmbRGudPQ2X7sREJbWNKzobIpJ5LVquK2yfVVUXtxwVFtE0Kk7Obk0L/\nE/v8JblO0rfmXy3JcFAkTLwcptzgfB6RvszVAF/+AT79BTTVOmtXT7sdTvn28e+KWVPa/PO6umWm\nRMWe9uMCw5ore61+x8SNUbLXg5QAiohI1/F4YM3z8P59TsJj/GDGLXDGjwf2LojWQmVB+6Smu9bE\neTywf9sh0zjXQmNV+7ERiYep7CUd/9faXapK2ifVVYXtxwWGO9+/Ng+Xo4+dFDbWQO6/ncQvf3nL\n+ZSpTrVvwiUQHNGVX5GI71Xkw7t3w4bXnONBI51poaPPOvrrqve2nw5fmd9+XGBY+zdp4sac+Js0\nckKUAIqISNcozHGme+Yvc47TZzrbjidm+jau3spap6p1aFLY2TVxHreT7LW+T/FaZwObQ0UmH5Ls\nTerbTaKr97bfJfCwD6HhbaeXJU1qqTgUr3OSvrUvtlRDg6OcJtpTb3BeJ9LfbV8Mb/8QSrc4xxkX\nONNCY4d2/M2XQyvySZM69uaL9DglgCIicmJqy+Cjh2HFXwHrTB88+2FnN0RtitF5HV0TFxQJg4bB\n/h3QVNP+elRK+2mcEUO6PXyfq97XXJU4xjS0gFCn0tm6h1jqjOZq30UQFN5jIYv0Cq5GWPqEs1tz\nU43zMxIac+Tp14dOXx88Usles8LyOjYVVzI7I8HXoRyWEkARETk+Hg+sfhY+eMDZCdP4w8n/7bR2\n0Db4XetYa+KiUttP44yI9128vU1N6SFJdauNKIKjYdJVTrUvYYJv4xTpDSoLnV2b17/sHAdFtt/V\neNDI/r+e+zg0uNw8vWQnv/9wG/5+ho9uO50hUSG+DqsdJYAiItJ5BSvhrduhcJVzPGyWM91zyDjf\nxjWQVJU4O3gOHgXhcb6Opu+pLXOqf0PGQ1CYr6MR6X32b3emqvu6BUsf8emWfdz/Ri47Sp0ZGedl\nJnL//Akk9OEEUNvyiIiIs6X3hw/AqmcB6+wMefbDznb4mu7ZsyITnD9yfMIG9a0+lCI9bfBIX0fQ\nZzS43Pz41XUUlNcxIj6cB+ZPYNbovj8LQwmgiMhA5nE7G2V89BDUHQC/ADj5f+D0H6oPmoiIDDgN\nLjdujyUsKIDgAH/u+/p4tu+r4ZunDicooH9UTJUAiogMVHuWw6LbnM00AEacAef9AuLH+DIqERER\nn1i8aS8PvJnL3PEJ3H3+eADOntCHd1Q+AiWAIiIDTfU++OB+yPmHcxyVCuc8AuMv1HRPEREZcPaU\n1fLAmxv4YKPTrmfJtv00uT0E+vePit+hlACKiAwUbpfT0mHxw1BfAX6BMPM7cNrt2hpfREQGnPom\nN09+sp0nPt5Og8tDRHAA3ztrNDfMHNZvkz9QAigiMjDkfeXs7lmyzjkeOQfO+znEjfJtXCJdwOX2\nsG1fNesLKllfUMH2fdWkxoby/bljGBLZ+3bqE+lJ1lqsBT8/Z4bHHxZvo7S6gatnpDMmYeCu9T5Q\n08j8PyxhT1kdABdlJ3PXvHG9sr1DV1MCKCLSn1WVwAf3wZoFznF0Opz7M8g4X9M9pc9bsauMRxZt\nZGNRJfVNnnbXD67hAXho4QZqGlxkpkSTlRLN2MRIQgLV3Fr6F2ste8rqWFdQwbqCCtYXVLC+sIIn\nrp3KKSMHA+D2WP72+S7+9vkupg6N5eoZ6ZyflURo0MD6eYgNDyIjMYrwoAAemD+Bk0YM9nVIPUYJ\noIhIf+R2wbKn4OOfQUMl+AfD1/4XTv2+eqNJn9Hk9rC1pJr1zQ+z6woqOG1MPD+Y62xUFBzgz+q8\ncgDSBoWSlRJNZko0o+IjOFDbSERwy2POm2sK2VvVAMv3ABDgZxiTEElWSjQXTErqF1u7y8BV1+jm\n5meXs76gkoq6pnbXNxVXehPA+ZOSKa6s542cQlbuPsDK3Qd44M1cLp6cwrUnDWVsYv+sCtY1uvnj\nx9s4e3wiWanRAPzisolEBAcQ0I+nex6OEkARkf5m1+ew6HbYu8E5Hn0OnPeo0/RXpA/448fbeHd9\nMRuLq2h0ta3shbWqUoxNjOQf3zyJzJQoYsKCjng/ay2PXzPFWxFZ1zxNdENRJRuKKhk5JNybAK7Y\nVca/lu8hK9VJJscnRalSKD7n8Vh2l9W2VPUKKqhrcvPv//kaAKFB/mwtqaairom4iCBvpfvgx6To\nlmmNw+LC+enFWdw9bxwL1xayYNkecvaU8+yXu4kJDWRs4lhffZndwlrLu7klPLRwAwXldXy+rZRX\n/nsmxpij/t7oz5QAioj0F5VF8P69sO4l5zhmKJz3GIw9z7dxiRyi0eVhS0lVm2lqf7hmCmmDnOr0\nrtIa1uRXADBscFibh9nM5GjvfYIC/Dh1dNwxP58xhhnDBzFjeEuD+JoGFxuKnDWDXxvVco8vt+/n\npZX5vLQyHwB/P8PoIRFkpkQzMTWa604eitH0aekhn2zZxxMfbyO3sJKqeleba8ZAdYPLW+l+8rqp\nJEWHkBgV0qG/o+HBAVw5PZ0rp6ezobCSF5bnccX0NO/1f3y1m03FlVw9I50JrX7u+pId+6q5741c\nPttaCsD4pCjumjduwP8MG2utr2M4YdOmTbMrVqzwdRgiIr7hboKlT8LHj0JjNQSEwKk/gK99FwJD\nfR2dCABlNY384t3NrC+oYHNxFY3utpW9P147hXlZSQDkFlZQUdfEhORookMDezTObXurWLK1lHXN\nG8ps3VuFp/lRKW1QKJ/9cLZ37EMLN5AWG0pWajTjk6IH3BoqOXEej2Xn/hqnMp3vvCFy0eQUrp6R\nDsB7ucXc8txKAIZEBrep6mWlRpPQTRuWWGuZ8+tP2LGvBoBJqdFcPSOdr09KJjy499ePahtdPP7R\nNv782Q6a3JaokABuP2cs1540FH+//pv8GWNWWmunHXOcEkARkT5sxyew6A4o3ewcjz0fzv0pxA7z\naVgyMDW43GwurvJW9YID/Ll//gTA2W59wn3v4m7OpkbEhbep7E1Mje6VD5Z1jW5vpdDPwHWnDAOc\nhHbKQ+97x/kZGNVcKcxKiebsCYmkxOgNGDm83324lSXbStlQWEl1Q9vK3iVTUvj1FdkAlNc2snL3\nAbJSont8d8rNxVUsWJbHq6vyqWyuPoYH+XPh5BT+42vDGDWk964VLKmsZ/YvP6am0c0V01L54bkZ\nxEUE+zqsbqcEUESkP6sogPfuhtx/O8eDRjhtHUbP9W1cMuB8sb2UN3IKWVdQwZaSKprcLc8VsWGB\nrLp3rne61Usr9pA2KIwJyVFEhvRsZa+rVdQ18XpOgbdqs3VvtTe5BXj2P2Zw2hhnXeGHG0vYtb+W\nrJRoJiRH9cpEV7qW22PZWVrtTHPOryS3sIK/3zTDWyW+/q/L+HTLPgASo0JaVfWiyEqJIT6y9yQr\n9U1uFq0rYsGyPJbvOgDAE9dO4bzmin1vsbO0hrTYUO+GLq/nFJA2KIwp6bE+jqzndDQB1G8gEZG+\noL4CitZCUQ4U5sDmt6GpBgJC4bTbYOZ3IcD3Dwwut4dnv9zNR5v2kj44zHmgSYlmTEIkQQEDa5e1\n/qS+yc3G5irY+oJKLp6SwsnNW6ZvLq7iheadNY2BkfHhbaapWdvSceTyaWlH+hR9TnRoINc3VwOh\n7fdoXUEFWSkta6ZeWZXPonXFgPO9GBHX8j2aNmwQ2WkxPR2+dIP6JjcL1xbxysp81uSXU9vobnN9\nQ1ElU4c6ych/nT6Cm2YOIzMlulcle4cTEujPJVNSuWRKKltLqnh1dQFnjU/wXr/ntXW43JarZ6Qz\nMTW6x9fXVTe4+P2HW3l6yU7uvWA8N8wcBsCF2Sk9GkdfogRQRKS3qSuHojUtyV5RDpTtaD9u3Hw4\n5xGISe/5GA+jvsnNxX/8go1Flc6JbS3Xgvz9ePjiTK5oTgCqG1wE+huCA7Rmqrf6x1e7WbOn/LDV\nrYSoYG8CeNqYeO69YDxZKdGMT45q03phIAkJ9GdyeiyTD1NtODczicjgQNYXOusft++rYfu+Gl7L\nKeTs8Qk8db3zhn1lfRMvLt9DZnOlsK9XSQeaXftruP2lNd7j5OiWyl5manSb9gozRx5786LeaHRC\nJHeem+E9rm5w8dKKfBpcHl5YvofxSVFcfVI6F2YnE9XNf3+ttby5tohH3tpASWUDxkBBeV23fs7+\nYmD+lhYR6S3qDjjJ3sFErzAHDuxsP84/GBImQHI2JGVD6nRIGN9+nA85D8AxVDc08Z3ZoymvbWR9\n80YaO0prSI5uWQ/1zBe7+O0HWxibGNmmWjQ2MVJJYQ9y1rc5Vb2dpTXe9XoAf/t8J9ubN4DwMzAm\noWV9W+uH15HxEYyMj+jx2PuS+ZOSmT8pGWi/TrJ1wrg+v4KH39roPT50neSUoTH6+eglahtdLFxb\nRM6ecn56cRYAGYlRXD0jjey0GOaMSxgQa84iggN467uzeGFZHq+symdDUSX3vraen761kQsmJvGd\n2aNJH9z1vWe3lFRx3+u5fLljPwCT0mJ4cP4EJqma3iFaAygi0lNqy9pX9g7saj/OPxgSM51E72DC\nN2Qc+PeuakCT28PfPt/JuKQobw+1qvomAv392vVNq6pvIijAz/vwet/r63n2q90c+k9QoL/h9DHx\n/OWG6d5zjS6Ppo92kaKKOt5ZX+xNPrbtraZVYY+ld83x7ir4z6W7aXJ5yEqNZlxSFGFBes+4u20o\nrOS5r3YfcafUZXfPYUik8//nw40lhAb5k5kS3e2VFmmRW1jBC8v28NrqAqqaN295+39nMS4pyseR\n+V6Dy827uSUsWJrnTcwW334Gw+PCAWfHU78u2IFz5e4DXPGnL3F7LLFhgdx5bgZXTEvrknv3dVoD\nKCLiS7VlbRO9whwo391+XEAIJGS2JHrJ2RCf0euSvUN9sa2Un7yRy7a91QyPC+e9759GoL/fEaes\nHXr+gQszuePcDHJb9YFb11wpDPBrSfbKaxuZ/sgHjB4S6Z1GlZUSTUZipJpzH8XBHnfr8itIjQ3l\n7AmJAOwqreWBNzd4x/n7GTISIrxbyge3SrSvPWloj8c90I1PjuJnlzjVpNa9EtcXVFBQXudN/gAe\neWsjO0qdCm27XokpPd8+oz+rb3Lz2uoCFizL8/anBJiSHsNVM9IZ2g0Vrr4oOMDfW+3esa+az7fv\n9yZ/1louffILRsRFcM1JaUxJjz3utYLZaTHOzsEp0dx29pgB28z9RKgCKCJyomr2Q9HqVsneGqjI\naz8uIBQSs5qTvUlOwhefAf595724ooo6Hn5rI2+tLQKcB8/75k/gzLFDuuT+1Q0uquqbSGqeLrpi\nVxmX/+nLdpXCAD/D6IRIfntltnddjbV2wDb3XZtfzvJdB7yJ9PZ91d7v2dzxCfy5eY1ZVX0TDy3c\n4E0UxiVFKZHugzwey31v5LI2v5yNxVU0utpWCr87ZzQ/mDsGcNZE7dxXQ2ZKlB6Uj1N1g4uTHvmA\nmkY3kSEBXDollatmpJGRqKpfR20tqWLubz71Ho9JiODqGelcMjmV6LCjv1mxsaiSR9/exKOXZnn/\nbdDMkMPrE20gjDHfB24GLLAOuAlIAl4ABgMrgeustY1Hu48SQBHpMTWlzYnewYRvDVTsaT8uMMxJ\n9lpP44wb06eSvdYaXR6eXrKT33+0ldpGNyGBfnxn9mhunjW829ckta5mtU5wPBZW3nMWg5vX2Xz/\nXzlsLKpsUwkZnxTVr5pzVze4vFXTy6ameh/ov/XPVby1rsg7LsDPMCbBqZrOHDVYu+H1Y01up1KY\nW1DptBwoqOB/zxrtfVPmr0t28uBCp+qbNii0zZrbzORoYsOVFLZW3eDijZxCFq4t5K83Tve+QfKX\nz3YQGxbEvKykfvU7pSft3l/DgmV7eHnlHkqrnUf74AA/zs9K4sfzxrXbDbWironfvL+FZ7/chcfC\nVdPTePTSiT6IvO/o9QmgMSYFWAKMt9bWGWNeBBYB84BXrbUvGGOeBNZYa5842r2UAIpIt6je134a\nZ2V++3GBYZA4se00zrgx4Nd/HhKq6puY/atP2FfVwHmZidxzwXifNrmubXSxubiqzQYas3/1MTua\nNy05yM/A6CGRXDk9jf84dXhPh3lCGl0eVu5uqeqtL6xgZ2mNt7L395umc0bzQ/6rq/JZtrOszWY6\nquwJOH83nvtqNxsKK2k4pFI4ODyIFfec5a2cL99Vxsj4CAYNsKTQWsva/AoWLMvjjTWF3vYNv7ly\nEhdPTvVxdP1Po8vDBxtLWLAsj8+2lhIdGsjSu+Z4f2fVN7l5c00hj72zidLqRvwMXH/KML4/d4ym\nNh9DX1kDGACEGmOagDCgCJgNXNN8/RngfuCoCaCISJcpWgMrn4Et70BlQfvrgeGQNPGQyt7ofpXs\nHVRQXsegsCBCg/yJDAnkZxdnERTg521u7UthQQHtttt/6zuz2NCqD9v65vYFm0uqqKxv8o5btrOM\nO19Z22a9W2uvfetr3geR215cQ25hxWHHzc4Ywg+bt0PP21/LLc8d+Y3IX1w2iaxUpy/cEx9v5/Wc\nw/zdAlJjw/jLDc6/3TUNLq7+81dtrgf6G+/Oqa13GDzYo0vkUAf/brjcHrbtq25TSR8SGeJN/mob\nXVz5py/xWEiJCfWuC52QHEVWSrS30t6fuD2W55flsWBpHhsOtq8BZgwfxDUz0jk3M9GH0fVfQQF+\nzMtKYl5WEnn7a9m2r8r7O7e6wUXmfe96x04fFssD8zMZn6zptl3JZwmgtbbAGPNLIA+oA97DmfJZ\nbq11NQ/LBw47b8UYcwtwC0B6eu/ogSUifVRDFax/BVb+HQpXt5wPimhZq3dw3d7gUf0y2WutweXm\nz5/u4PHF2/jPWSO47eyxAG0a//ZGoUH+TB0a6220DE6bg43FlcS3enhdm1/OztKaw90CoM16w7yy\nGjYVVx12XOtd/xrd7iOOA6hramkIvbeq/ohjm1rt+hgbHsRZ4xKIjwx2HsZTohmTGKE2AHJcAvz9\nyEiMIiMxisub+3G2VlbTyJT0WHILKykor6OgvI53cou91/9y/TTv74DC8jqCAvz6fJsDPwPPL81j\nY1ElMWGBXNa8tm/UkMhjv1i6RPrgsDZtIpbu2I8xMDg8mLvmZXDx5JQBu7a7O/lyCmgs8ApwJVAO\nvAS8DNxvrR3VPCYNeNtam3m0e2kKqIgcl8LVTtK37mVorHbOhcTApKsh+xpnd06/gbXIfPHmvTzw\nRi679tcCcOmUVH55+cR+9Q9wfZObvLLaNo3NWxubEOndTnxXaU2b5K21qNBA7zTY+ib3UZPKoYPD\nvG0USirrKas5/NL2oAA/9dQTn3J7LNv3VbeppOcWVvLu904jbZDzoH7HS2t4aWU+Sa0anR9cW3jo\nOq7eoqKuiddzCliwbA+PXzPZ+3P2wYYSahpdnDMhUdOme4mKuiZCA/21yctx6AtTQM8Cdlpr9wEY\nY14FvgbEGGMCmquAqcDh58mIiByP+kpY/7KT+BWtaTmfPhOm3gjj50Og79a2+cqesloeXLiB9zeU\nADB6SARKGuH7AAAgAElEQVQPzJ/AzFFxx3hl3xMS6M+YhI69wz+seQvzjtyzo33AEqJCvL32RHob\n/+YNhMYkRHqnFbs9ltYt1oyB8CB/iirqKaqo9/7eALhgYhKPXzMFcCraB2ob27Sv6EnWWlbllbNg\nWR4L1xZS3+RU2F9dlc8d5zjTt3v7zIaBSOv8up8vE8A84GRjTBjOFNA5wApgMXAZzk6gNwCv+yxC\nEekfrIWCVbDyb7D+VWhqrtSExsKka2DqDRA/1rcx+tCOfdWc93+f0eDyEBEcwPfOGs0NM4cR6K93\nX0XESQpb+/llk3j0konsKK3xVgrXFVSwobCS1NiW6XwbCiu58A+fkxDlTGOekBztXVvY3W+CLFiW\nx98/38Xmkpbp1l8bNZirpqdz9gQlfTKw+XIN4FJjzMvAKsAFrAaeAt4CXjDGPNx87mlfxSgifVx9\nBax90dnUpWRdy/lhs2DKDTDu6xCoSszwuHBOHjGY2LBA7po3jiGqTonIMfj5GUYNiWDUkAgumuxs\n1+Dx2DY7jZZU1hMRHEBJZQMllXv5YONe77X4yGDe+s6p3t831Q0uwoP8j3u6ubUWa/FO316+q4zN\nJVXERQRx2dQ0rpqe1uGKvkh/p0bwItK/WAv5K5wpnutfAVedcz5ssLOub8oNzq6dA9ju/TU8/NZG\nbj97rLeJuprqikh38Hgsu/bXeNcTriuoILfA2XFzzX1nexO2r/9+CUUV9WSlRLX0KkyNJjEq5KhJ\n4YGaRl5dXcCCZXn8YO4Y5mUlAU7z8J2lNZw1LkG/22TA6AtrAEVEuk5deXO17++wN7fl/PDTnLV9\nGRdAQO/cnKCn1DW6eeLjbTz56Q4aXR6stfzlhukAekASkW7h52cYER/BiPgILsxuqRTurWrwJn9u\nj6Woop7S6gYWb97H4s37vK+PiwjiB3PHcs1J6d6xfgaW7izjhWV5LFpfTGNz1fGtdUXeBHBcUlSH\n1+WKDDRKAEWk77IW9ix1kr7cf4Or3jkfFgeTr3WqfYNH+jTE3sBay7u5JTy0cAMF5U5F9NIpqdx5\n3sBd9ygivuPnZ0iMbplq7u9nWH73HPLKar3rCdcXVLC+oJLS6kbCg1t253x55R7uf2ODd3deY+D0\nMfFcPSOdOeOG9PjXItIXKQEUkb6ntgzW/stJ/PZtajk/4kxnQ5ex50NAkM/C6032lNVy92vr+XSL\n8476+KQoHrxwAtOGDfJxZCIiLYwxDB0cztDB4VwwMRlw3rzKP1BHdFjLrpBbSqqpa3IzJDKYK6en\nccW0NG97ChHpGCWAItI3WAt5XzZX+14Dd4NzPnwITP4GTLkOBo3waYi9kTGwbOd+okICuP2csVx7\n0tB2O/qJiPRGxph2yd0954/j1tNHMCgsiADtVCxyXJQAikjvVrMf1iyAVc9A6ZbmkwZGznHW9o09\nD/zVM+ggay2fbNnHaaPj8fMzpMaG8cdrpzAxNYa4iIG9BlJE+j5jjM/6Cor0F0oARaT3sRZ2LXGq\nfRvfAHejcz4isaXaFzvMlxH2Stv2VnH/GxtYsq2URy/J4qoZzqYJszPU80pEREQcSgBFpPeoKYWc\n551q3/5tzScNjD7b2dBlzDmq9h1GdYOL33+4laeX7MTlsUSHBmpXTxERETksJYAi4lseD+z6rLna\n9yZ4mpzzkclOpW/yNyAm3ach9lbWWt5cW8Qjb22gpLIBY+DqGWnccU4Gg8K1CY6IiIi01+EE0BiT\nba3N6c5gRGQAqd4HOf90qn1lO5xzxg/GnOus7Rs1F/z1HtXRLFxbxHcXrAZgUmo0D16YyaS0GB9H\nJSIiIr1ZZ56uVhljVgJ/ARZYayu7KSYR6c9qSuHDB52pngerfVEpMOV6p9oXnerb+Hoxay07SmsY\nGR8BwLmZiZw8YhAXZadwxbQ0b1NlERERkSMx1tqODTTmAeAGIB2oBV4G/mKtXdJ94XXMtGnT7IoV\nK3wdhogcjccNK/4KHz0E9RWHVPvOAj//Y95ioCqvbeTVVQUsWJbHztIavvzxHOIjnR09rbUYo8RP\nRERkoDPGrLTWTjvWuA5XAK219xlj7gfOBr4JXAVcZ4zZhlMVfMZau/c44xWR/ixvKSy6DYrXOccj\nZ8N5P4e40b6Nqxez1rJ81wEWLMvjrXVFNLo8AMRFBLN9X7U3AVTyJyIiIp3RqQU21ikXvgu8a4wZ\nBFyPkww+BjxsjHkLJxl823a0tCgi/Vf1Xnj/PljzvHMcnQbn/BTGfd3pUC6H1eBy8/XfL2FLSTXg\nfKtOGxPPNTPSmDMugUA1PxYREZHjdNw7LFhry4DfGmOeAf4P+AZwEXAhkG+MedRa+0TXhCkifYrb\nBcv/Aot/Cg0V4B8EM78Ls26DoDBfR9frWGtZufsAU4fGYowhOMCf1NgwymubuGJaGldOTyNtkL5v\nIiIicuKOOwE0xszGqf5dDIQAOcBTQAPwbeBxY8xIa+3tXRGoiPQRuz6HRXfA3lzneNRcOO8xGDzS\nt3H1QvurG3hlVT4vLNvDjtIaFvznyZwycjAAj106kdiwQAJU7RMREZEu1KkE0BiTAtzU/GcYUAM8\nB/zZWtt6F5a/GWOeBm4ElACKDARVxfDevbDuRec4Jh3OfQzGnqfpnq14PJYvd+xnwbI83s0tpsnt\nzJZPjArhQG2jd9zBNX4iIiIiXakzfQDfBs4C/IGVwKPA89bamiO85EOcRFFE+jN3Eyz9E3z8KDRW\ngX8wnPp9OPV7EBjq6+h6naue+oplu8oA8DMwJ2MIV89I54yx8ar2iYiISLfrTAVwJs4GL09Za1d3\nYPxi4OvHFZWI9A07P3Ome+7b6ByPOQ/O/RkMGu7buHoJj8fy+fZSslKiiQkLAmD68Fj2HKjlyulp\nXDEtjeQYJckiIiLSczrTBzDMWlvbzfEcF/UBFOlhlYXw3j2w/hXnOHa4s85vzDm+jauX2FtVz0sr\n8nlheR57yuq494LxfPNUJymubXQRHOCPv5q2i4iISBfq8j6AQKwx5iRr7eIjfMIzgc3W2sJO3FNE\n+hJXI3z1R/jk59BUAwGhzs6eM78DgSG+js6nPB7LZ9tKWbA0jw82luDyOG+upcSEEh7U0uQ+LOi4\n994SEREROWGdeRL5GTAGOPkI1x8GNgP/caJBiUgvtH0xvP1DKN3iHGdc4Ez3jEn3bVy9xA9ezOG1\nHOf9L38/wzkTErh6RjqzRser2iciIiK9RmcSwNOAp49y/W2cthAi0p9U5MO7d8GG153jQSNh3s9h\n1Fm+jcuH3B7Lp1v2kRIbypiESADOGp/AyrwDXDU9ncunpjIkamBXREVERKR36kwCmAgcbXpncfMY\nEekPXA3w5ePw6S+hqRYCw+C0O+CUb0HAwGxRUFRRx4vL8/nX8jwKK+q5bGoqv7x8EgDnZSYxLzMJ\nP1X7REREpBfrTAJYAYw4yvUROH0BRaSv2/qBM92zbLtzPP4iOOcRiE71bVw+4HJ7+HjzPhYsy2Px\n5r00L+1j6OAwxidFecdpmqeIiIj0BZ1JAL8AvmmM+bW1dn/rC8aYOJy1f190ZXAi0sMO7Hame25a\n6BzHjYHzfg4jz/RtXD70uw+38ruPtgEQ6G84b0Ii18xI55QRg1XtExERkT6nMwngo8BnwEpjzKNA\nTvP5bOBHwKDmMSL9WkVdE/e+tp6cPeWMSYgkKyWa78we1beTgaZ6+OJ38NmvwFUPgeFwxp1w0n9D\nQJCvo+tReyvrKattJCPRqe7Nz07hzbVFXDU9jUunphIXMTCnv4qIiEj/0OE+gADGmMuBPwORrU8D\nlcCt1tp/dW14HaM+gNJTNhdXcetzK9i1v6UlZkpMKJ//aLb3+EevrCUxOoSslGiyUqJ7/2YgW96F\nt++EAzud48zL4OyHICrZt3H1sCa3h2e+2MVvP9hKSkwob333VAL8/QCw1mJMH07wRUREpN/rjj6A\nWGtfMsa8B3wdGN18eguw0Fpb0fkwRfqOReuKuP2lNdQ2uhmXFMWDF06gsLyOBpfHO6aitokXlu9p\n87ohkcFkpUSTmRLN/OxkRsZH9HToh1e2E975EWx5xzmOHwfzfgHDZ/k2Lh/4cvt+7ntjPVtKqgFI\nGxRGdYOLmDCn+qnkT0RERPqLTnckbk70/tENsYj0Wm6P5YmPt1Pb6Oai7GR+dslEQls19z4oMMDw\nf1dls76ggnUFFeQWVLK3qoEPN+3lw017yU6L8SaA7+YWk1tQQWZKNBNTY0iICu6ZRKOpDpb8Bpb8\nFtwNEBQJZ/4YZtwC/oHd//l7keKKeh5ZtJE31zgbHA8dHMb9X5/AmRlDfByZiIiISPfodAIoMhD5\n+xme+MYUPty4l+tPGXrERC0sKIALs1O4MDsFAI/HsruslnUFFawvqGBiarR37Ftri3hjTUtnlbiI\nYDJToshKiWbG8EHMGh3ftV+EtbB5kVP1K89zzk28EuY+CJEDr4OLy+3hsie/IP9AHSGBfnz7zFHc\nPGsEIYHtE3sRERGR/qKzawBTgW8DJwGxgN8hQ6y1dlLXhdcxWgMo3WFDYSUvr8zn3gvGdUtl7pMt\n+/hie6lTLcyvoLLe5b02J2MIT984HYDqBhdPfrydzJRoslKjSY4O6Xw8+7c76/y2ve8cJ2Q60z2H\nzuyqL6fPaL2eb8GyPD7ZvI97LhhHamyYjyMTEREROX5dvgbQGJMBfA5EA7tw+v7tAOJxNoXJA/Z1\n4n5jgdabxowAfgLEAP/Z6l53WWsXdfS+Il3h9ZwC7nxlLfVNHsYmRnDl9PQu/xynj4nn9DFOlc9a\ny56yOtY1Tx0dk9CyTjC3oILHF2/zHg8KD3KSwZQoMpOjOW1MPOHBR/hRbqxxdvb84vfgboTgaJh9\nN0z7JvgPrAkAheV1PPzWBiYkR/OtM0cBcNX0NK6e0fX/b0VERER6qw5XAI0xLwJzgTOAAmAvcBaw\nGPge8ENglrV225HucZR7+zff8yTgJqDaWvvLjr5eFUDpKi63h8fe2cSfP3N2xLx0SiqPXJzp02mB\nO/ZV89LKfO+6wvLapjbXv/zxbJKiQwFnWqkxkJUcRWrx+5h374bKfGdg9rVw1v0QMbDWtzW43Pzl\ns508/tE26prcDA4P4vMfzdZUTxEREelXumMX0NOBp6y1a4wxgw9+HutkkL8xxswAHgMu7Xy4zAG2\nW2t3a7c98ZWymka+/fwqvti+nwA/w0++Pp7rTj7yer+eMiI+gjvPzQCcSmH+gTpyC51kcFdpLYmt\n2kz834dbcO3dwv0Bz5Dmvw6AxvhMgub/BtJm+CR+X/p4814eeHMDO0trADh/YhL3nD9OyZ+IiIgM\nWJ1JAKNxWj4ANDZ/DG91/VPgoeOM4ypgQavjbxtjrgdWALdZaw8c+gJjzC3ALQDp6ZrCJSdm9/4a\nrvnzUgrK64iLCOKP105lxvBBvg6rHWMMaYPCSBsUxrmZSW2u2YYqHol4mckVzxOAiwobxi9cV/L8\nnjnMfNfy/bllTB3a+76m7lDT4OL7/8rhvQ0lAIwaEsED8yfwtVFxPo5MRERExLcO3cTlaPbirPfD\nWlsF1AIjW12PAII6G4AxJgiYD7zUfOqJ5vtmA0XArw73OmvtU9baadbaafHxXbxbogw4idEhxEUG\nk50Ww8LvzOqVyd8RWQvrX8E8PoPpBc8SgBs7+Xp2XP0ptZNuJDAggCXbSqlpcHtf0uT2HOWGfV9Y\nkD/ldU2EB/lz17wMFn13lpI/ERERETq3BnAhUGutvaL5+F2cZvCX4SSSLwFF1tpObStojLkQ+Ja1\n9uzDXBuG02Q+82j30BpAOR5Nbg+NLo93A5XS6gYiQwIIDuhD0wP3boK374CdnzrHyZNh3q8gdap3\nSEVtE4vWF3HltDT8/JzprLc+t4KqehdXz0jn7AkJfetrPoKPNpUwPC6C4XHOxITd+2sICfQnodUU\nWREREZH+qjvWAL4IfNcYE2qtrcPZsXMxsLz5ehNwc6cjhatpNf3TGJNkrS1qPrwYWH8c9xQ5qn1V\nDXzr+VVEhQTy1HVT8fMzxEUE+zqsjmuogo8fhaVPgscFobEw5z6Ycj34tU3mosMC2+x0WdPg4ott\n+6lqcPHF9v0MCg/isqmpXDU9jRHxEYd+pl4vb38tD7yZy4eb9nL6mHj+ftN0jDEMHRx+7BeLiIiI\nDDCd6gPY7sXGjAYuB9zAm9baDZ18fThO+4gR1tqK5nPP4Uz/tDjtJm5tlRAeliqA0hk5e8r5r+dW\nUlxZz5DIYF79n5l9pwectbDuZXjvHqguBgxMuwlm3wthHZ+2WlHXxOs5BTy/NI9NxVXe8yePGMT9\n8yeQkRjVDcF3rfomN3/8eDtPfrKdRpeHyOAAvjd3DDfNHOatdIqIiIgMFB2tAHYoATTGBAJZQKm1\nNq8L4utSSgClo15cvod7XltPo9vD1KGxPHHtFIb0lSmCJbmw6A7Y/blznDINzv+lM+3zOFlrWZNf\nwYKlebyxppAGl5svfjSHxGjne1JV30RkSGBXRN9lrLW8t6GEhxZuIP9AHQCXTE7hR/MyGBLZR/5f\nioiIiHSxrp4CaoBlwO3Ab08kMBFfaHR5eHBhLv/4ynn/4hsnp/OTCyYQFNCZfZB8pL4CFv8Mlj0F\n1g1hg+GsB5y+fn4nFr8xhuy0GLLTYrjngnEs31XmTf48HssFv19CQlQIV89I47zMpF7RPqG4sp7v\nPL+aRreHjMRIHrook+nD+tCmPSIiIiI+1KEE0FrbaIwpAfr31oHSbz375S7+8VUeQf5+PHxRJldM\nT/N1SMdmLax5Ad7/CdTsBeMHM26BM+9y1vx1sciQQGZnJHiPd5TWsK+qgd37a1m2s4z739jAJVNS\nuHpGOmMSIrv88x9NXaObkEA/jDEkRYfyvbmjCQv05xsnDyXAvw8k8SIiIiK9RGd2AX0cmGCtPbN7\nQ+o8TQGVY2l0efjBizncPGsE2Wkxvg7n2IrWOtM993zlHKedDPN+AUkTezSM6gYXb+QUsmBZHusK\nKrznpw6N5Q/XTPFWC7uLtZZ31hfz0MIN3HleBhdmp3Tr5xMRERHpq7p0DWDzDUcArwD5wK+BrTi9\nANuw1pZ1LtQTpwRQDuffq/OZPTaB6LDetYbtqOoOwEePwIqnwXogfAjMfRAmXQXGtxubrC+oYMGy\nPF7PKSQ82J/P75ztrb4VVdSRFB3apZ9v295qHngzl8+2lgIwO2MIf71xepd+DhEREZH+ojsSQA/O\nzpym+ePhWGttZ1pLdAklgNJag8vN/W/ksmDZHs4YG8/fbnTaAvRqHg/k/BM+uB9qS8H4N0/3/DGE\nRPs6ujZqGlzsLK0hM8WJa391A6f87CMmpERx9Yx0LpiYRFjQ8f8aqGlw8buPtvLXJTtpcluiQwO5\n/ZyxXDMjHX/t7ikiIiJyWN3RB/DXHDnxE+kViivq+a9/rCRnTznBAX7Mn5Tc+5O/wtXw1u1Q0Pwm\nxtCvOdM9Eyb4Nq4jCA8O8CZ/AJuKqwgO8GN1Xjmr88p56M0NXDTZWSs4Prlz7SS2lFRx/dPLKK6s\nxxi4anoad5wzlsF9qUejiIiISC92Qn0AewtVAAVg2c4y/uefqyitbiAlJpQ/XTe1TaLS69SWwUcP\nwYq/ARYiEuHshyHrMp9P9+ys2kYXC9cWsWBZHqvzyr3ns9Ni+NetJxMc0LHdQxtcbs75zadEhQby\n4IWZfWO9poiIiEgv0B0VQJFe69kvd/HgmxtweSwzRw7m8WumMCg8yNdhHZ7HDauehQ8fhLoy8AuA\nk/4LTr8TQnp/A/bDCQsK4IppaVwxLY1NxZW8sGwPr6zKJzIkwJv8WWvZXFLVpsl8VX0TT36ynZtP\nHUFseBDBAf788z9PJjEqRNM9RURERLpBhxNAY8yUjoyz1q46/nBEjs+u0lpcHst/zhrOnedm9N7W\nAPkrYdFtzrRPgGGzYN4vYUiGb+PqQhmJUdw/fwJ3npvB/poG7/mVuw9w2ZNfkpUSzTUnpRMc4Mej\nb29ib1UD5bVNPHJxFgApMV27mYyIiIiItOhMBXAFHVsD6PtO0TIgWGu96/t+PC+DM8bGc9qYeB9H\ndQQ1pfDhA7DqOcBCZDKc8whMuLjPTffsqNAgf1KDwrzHBeV1RIcGsq6ggh+/us57Pjsthqump/si\nRBEREZEBpzMJ4HdpnwAGACOBa4HNwD+7KC6Ro/pieymPvbOZv984ndjwIAL9/Xpn8udxw4q/wkcP\nQ305+AXCKd+C0+6A4AhfR9ejLsxO4ZwJiby9vogFy/ZQVtPILbNGcNnUVPw03VNERESkR3Q4AbTW\nPn6ka8aYR4CVQGlXBCVyJNZa/vr5Ln66aCNuj+Wvn+/ktrPH+jqsw9uzDN66DYrXOscjznR294wb\n7du4fCgk0J+LJ6dy8eRUX4ciIiIiMiB1ySYw1toSY8xTwF3Av7riniKHqmt086NX1/J6TiEA/3PG\nSL531hgfR3UY1fvgg/ucvn4AUalw7k9h3Px+O91TRERERPqGrtwFdB/QC5/GpT/YU1bLrc+tZENR\nJWFB/vzy8knMy0rydVhtuV2w4mn46BFoqAD/IJj5XZj1AwgK93V0IiIiIiJdkwAaYwKAq3CSQJEu\nVVbTyPzHl3Cgtolhg8N46vppjEmI9HVYbe3+AhbdASXrneNRc+G8x2DwSN/GJSIiIiLSSmfaQPzu\nCJcGAbOANODerghKpLVB4UFcMT2NLcVV/PaqyUSHBvo6pBZVxfD+T2Bt88znmHQ491EYO0/TPUVE\nRESk1+lMBfDbRzhfD2wDHrHWPnXiIYlATYOLoop6Rg1xdsr84TkZGOg9u0W6m2DZU7D4Z9BYBf7B\ncOr34NTvQ6D62ImIiIhI79SZBPBwc+6stba2q4IRAdhVWsOtz62ksr6JN79zKnERwfj7MvHzeGD/\nNijKgcIc52PRWifxAxhznrPJy6ARvotRRERERKQDOtMGoqY7AxEBWLx5L/+7YDWV9S5GxIdTXe8i\nLiK45wLwuKF0a6tkb43TxqGxuv3YuDFw9sMw5pyei09ERERE5AR0Zg3gBGC6tfbvR7h+I7DMWruh\na0KTgcRayx8Wb+NX72/BWpg7PoFfXzGJyJBuXO/ncUPplpaqXmEOFK+DpsO81xGVAknZkJztfEya\nBJEJ3RebiIiIiEg36MwU0AeBKODvR7h+NTAPuOIEY5IBprrBxe0vruGd3GKMgR/MHcO3zxzVtev9\n3C4n2Ws9jbN4HTQdZgZzVGpLopfcnOxFDOm6WEREREREfKQzCeBJwB+Ocv0j4FsnFo4MRCt2lfFO\nbjGRIQH831XZzM44wcqa2wWlm9tX9lx17cdGpzkJXnI2JE1uTvbiT+zzi4iIiIj0Up1JAOM5ep+/\nMkBlEum0M8YO4aGLMjl1VBzD4zrZMN3dBPs2OWv1vJW99UdI9tIheVKryt5kCB/cNV+EiIiIiEgf\n0JkEsBTIOMr1DKD8xMKRgcDjsfz+o22cOjqOqUNjAbju5KHHfqG7CfZubDuNsyQXXPXtx8YMPWQa\nZzaEDerir0REREREpG/pTAK4GPhPY8wT1trtrS8YY0YCNwOLujI46X8q65v4wb9y+GDjXl5Ynsfi\n288gJNC//UC3C/bmtp3GWZIL7ob2Y2OHtd+gRcmeiIiIiEg7nUkAHwYuBFYbY/4I5DSfzwb+G/AH\nHura8KQ/2VpSxa3PrWRHaQ3RoYH87JKswyd/DVXwzHwoXNX+Wuzw9hu0hMZ2f/AiIiIiIv1AZ/oA\nbjLGnAc8A/wQsM2XDLATuFEtIORI3llfxG0vrqGm0U1GYiRPXTeN9MFh7Qd63PDKzU7yFxYHw09r\nW9kLjen54EVERERE+onOVACx1i4xxowBTgFGN5/eAnxlrXV3dXDSP/xh8TZ+8e5mAL4+KZnHLs0i\nLOgIf/U+uA+2vAMhMfDN92DwyB6MVERERESkf+tUAgjQnOgtaf4jckzjkiIJ9DfceW4G3zx1OMYc\nob/fqufgi9+DXwBc+ZySPxERERGRLubX0YHGmFONMfce5fo9xpiZXROW9HWV9U3e/56dkcDHd5zJ\nzbNGHDn527UEFn7f+e/zf+VM/RQRERERkS7V4QQQuBuYcpTrk4G7Tiwc6Q8Wri3ka49+xNId+73n\nUmJCj/yCsh3wr+vA0wQnfwum3tj9QYqIiIiIDECdSQCzgS+Ocv0Ljp4gtmGMGWuMyWn1p9IY8z1j\nzCBjzPvGmK3NH7XFYx/hcnv42dsb+fbzq6mqd/FubsmxX1RfAc9fBXVlMPpsOFsbyYqIiIiIdJfO\nJICxQOVRrlcDHW6+Zq3dbK3NttZmA1OBWuDfwI+AD621o4EPm4+llztQ08iNf1vOnz7Zgb+f4b6v\nj+feC8Yd/UVuF7x0E5RuhvhxcOnT4HeYthAiIiIiItIlOrMJTBFOFfBIsoF9xxnHHGC7tXa3MeZC\n4Izm888AHwN3Hud9pQfkFlZw63MryT9Qx+DwIP5w7RROHjH42C98727Y/iGEDYZrXoCQqO4PVkRE\nRERkAOtMBfAd4KbDbfRijDkFuKl5zPG4CljQ/N8J1tqi5v8uBhIO9wJjzC3GmBXGmBX79h1v3ikn\nqsHl5uZnVpB/oI6JqdG8+Z1TO5b8LX8alj4JfoFw5T8hdli3xyoiIiIiMtAZa+2xRwHGmBRgNc5U\n0FeAnOZL2cClwAFgqrV2T6cCMCYIKAQmWGtLjDHl1tqYVtcPWGuPug5w2rRpdsWKFZ35tNKFPtu6\nj4VrinjgwgmEBHZgCuf2xfCPS8G64aInIPua7g9SRERERKQfM8astNZOO9a4Dk8BtdYWGGNOBf4C\nXNH856BPgVs7m/w1Ow9YZa09uGNIiTEmyVpbZIxJAvYexz2lG5VWN7B0RxnnT0wCYNboeGaNju/g\ni7fBSzc4yd/XvqfkT0RERESkB3WqEby1dgtwmjEmFRjdfHqLtbbgBGK4mpbpnwBvADcAjzZ/fP0E\n7qaBjHEAACAASURBVC1dbG1+Of/13EqKK+uJDTuJmaPiOv7i2jJ4/gpn58+x58Oc+7ovUBERERER\naadTCeBB1tp8IL/1OWOMHzDPWruwo/cxxoQDc4FbW51+FHjRGPNNYDdtK43iQy+t2MPdr62n0eVh\ncnoMI4dEdPzF7ian8le2HRKy4JKnwK8zS1BFREREROREHVcC+P/t3Xl4VeXV9/HvIkCCSgIhzLMQ\nQQTEgIIiBFTU4sCkVhSqVEStj/JoXx9Q61A7qKWWVlAUVEBRsBWkRdtqaolaB1pERAQJIKAoIGGe\np6z3j3NymuEk5ECSnZDf57pyZZ9733ufdTabnazcU15mlgr8GPgR0Ago8Tz+7r4HqFegbAuhWUGl\ngjh4OIdfvrmMFz9aB8B13Vvw0BUdiK9ewn9qd/jrPbDmPTi5AQydCfExJI8iIiIiIlIqjikBNLOT\nCLXM/RjoCRiwAphaeqFJRfD9rv3c/vIi/rN2GzXjqvHzAWcw9JwWsZ1kwbPwyVSIi4drX4E6zcsm\nWBERERERKVZMCaCZ9QBuIpT81Qac0Fp9v3X3ZaUfngTt0BFn9eY9NEyMZ9KwrqS1KHZC1sJW/gPe\nuje0PfBpaH526QcpIiIiIiIlctQE0MzqE+re+WOgPbALeJXQzJ8vAm8o+TvxuDtmRtM6tXjhxrNp\nUieBBrUTYjvJ91/CayPAc6D3/0Gnq8omWBERERERKZFiE0Azex3oT2hc3zvAL4HX3X2/mbUph/ik\nnB04fISH/7KMZnVrcXvftgB0aV7nKEdFsWdLaMbPAzuhw0Doc28pRyoiIiIiIrE6WgvgAGAVcI27\nLz5KXankNu3cz60zPuHTr7dTq0YcV3drFnurH8Dhg/DqMNi+Dhp3CS32rhk/RUREREQCd7Tfyv8G\ntAYWmNnrZjbQzI575lCpeBau3crlE/7Fp19vp0lSAq/e0uPYkj93eOMu+PpDqN04NONnzZNKP2AR\nEREREYlZscmcu19mZk2AEeGvOcAWM5sJvF8O8UkZc3dmfLyOn89bxuEcp8epyUy8Lo2UU+KP7YQf\nToDFM6B6rVDyl9ikdAMWEREREZFjdtR+ee7+nbv/yt3bElqf7++EZgKdRWgW0EvMrG3Zhill5dn3\nvuKBP3/B4RznpvNbM+Om7see/K34G2Q8GNoe9Aw0Oav0AhURERERkeMWU3dOd58PzDez24HrCc0M\nejMw0syWArPd/ZHSD7PycXeyNu0ucn/DxHjqnFQTgO17D7Jp54Ei67ZrVDuyvTZ7DwcO50Stl1Sr\nBo2SQt029x86wrote4s8Z8t6J5FQI46BXZry8oJ1/LRfOwae1bTYz1SsjUth9kjA4YKfwRkDj/1c\nIiIiIiJSJszdj+8EZp2AkcAwoI67x5VGYLHo1q2bL1y4sLzftljuTut7/1rk/kcHd4osqD7j43X8\nbO7SIuuufeyyyPZlT77PF9/tjFrvuu4t+PWgTgAsWb+dKyd+UOQ537jjfDo2TQLg4OEcalY/jkla\ndn8PUy6AHd9Ap6th8BQwO/bziYiIiIhITMzsE3fvdrR6xz2hi7t/Dow2s3sANfvkcVrDU4rcl1Sr\nRr7t4urm1bLeSRw6Er0FsGGeSVviq8cVe874PAnfcSV/h/bDrOtDyV+zs+HKiUr+REREREQqqONu\nAawIKmILYJXgDnNGwed/hMRmMGo+nNIg6KhERERERKqckrYAanE2OXbvPxFK/mqcDNfNUvInIiIi\nIlLBKQGUY7Psz/DPXwAGQ56DRp2CjkhERERERI5CCaDE7rvFMOeW0Ha/n0P7/sHGIyIiIiIiJaIE\nUGKzcwPMHAqH90GXYXDenUFHJCIiIiIiJVTiBNDM0sysTjH7k8wsrXTCkgrp4F6YNRR2fQctzoPL\nf6cZP0VEREREKpFYWgD/AxTX1+/ScB05EeXkwNzb4LtPoU5L+OFLUD0+6KhERERERCQGsSSAR2vq\niQMq/5oSEt27j8OyuRCfCNe9CienBB2RiIiIiIjEKNYxgMUleF2BrccRi1RUn78G7z4GVg2uegEa\nnB50RCIiIiIicgyqF7fTzG4DbstT9JiZ3RulajLQGJhRirFJRbB+Icz9SWj7kl9Dar9g4xERERER\nkWNWbAIIHAYOhLe9wGvylGcBLwKPlmp0Eqwd60Mzfh45AF1HQPdbg45IRERERESOQ7EJoLtPAaYA\nmNlm4B53n1MegUnADuyGV66FPd9D697Qf5xm/BQRERERqeSO1gIY4e71yzIQqUBycuD1W2DT55Dc\nBq6eDnE1go5KRERERESOUyzrANY2s+YFypqY2aNm9qyZ9S798CQQ/3wEvnwDEpJCM36elBx0RCIi\nIiIiUgpK3AIITAQ6AWkAZlYL+ABoGd4/wszS3f2j0g1RytXimfCv8WBxoZa/lNSgIxIRERERkVIS\nyzIQ5wFv5Hl9DaHk7xrgNOArYEzphSbl7uuPYd6doe3+46BN32DjERERERGRUhVLAtgI+DrP6/7A\np+7+mruvAl4AupVmcFKOtq2DWdfDkYNwzi1w9k1BRyQiIiIiIqUslgTwCFAzz+t0IDPP62wgpRRi\nkvK2fyfMvBb2ZkObC0Pr/YmIiIiIyAknlgRwNTAAwMwuAeoD/8yzvxmwrfRCk3KRcwRmj4Tvl0FK\nO7h6KsTFMjRUREREREQqi1gSwGeAfmb2HTAX+AbIyLO/J/BFLG9uZnXM7DUz+9LMlpvZuWb2sJl9\na2aLw1/9YzmnxCjjQVj5FtRKhutmhWb+FBERERGRE1Is6wBOMbPqwEBgB/Bzdz8IYGb1CE0I82SM\n7/8H4O/ufpWZ1QROAi4Bxrv7b2M8l8Tqk+nw0USoVgN+OAOSTw06IhERERERKUMx9fVz90nApCjl\nW4D2sZzLzJKA3sCN4XMcBA6aWSynkWO15n148+7Q9uW/g1Y9g41HRERERETKXCxdQCPMrJGZnWlm\nJx/He7cGNgNTzexTM3suz/n+x8yWmNkLZlb3ON5DojmwG14bATmH4dz/gbQfBR2RiIiIiIiUg5gS\nQDO7wMyWAN8Ci4Du4fIG4fF6V8ZwuuqEFpWf5O5nAXuAsYRaGNsAXYANwBNFxDLKzBaa2cLNmzfH\n8jHkP1Ngz2Zo2hX6PRJ0NCIiIiIiUk5KnACa2XnA38PH/BaI9NV09++BrcB1Mbz3emC9uy8Iv34N\nSHP3Te5+xN1zgCnAOdEOdvfJ7t7N3bvVr18/hret4g7shg/CQzX73gfV4oKNR0REREREyk0sLYAP\nA18CZwHjoux/nxgWgnf3jcA3ZtYuXHQhsMzMGuepNghYGkOMcjT/ngz7tkKzc0Jr/omIiIiISJUR\nyyQw3QnN/HnIzDzK/m+AxlHKi3MH8HJ4BtCvgBHAk2bWBXBgLXBLjOeUohzYBR9OCG33vRc04Y6I\niIiISJUSSwJYA9hbzP5k4HAsb+7uiyncajg8lnNIDHJb/5p3h1P7Bh2NiIiIiIiUs1i6gK4Azitm\n/w+Az48vHCkz+3f+t/Wvj1r/RERERESqolgSwOnAtWb2wzxlbmbVzezXhNb0e6FUo5PS8+/JsG8b\ntDgXTu0TdDQiIiIiIhKAWLqAPgmkAzOBTYTG6L0A1AdOAv7o7koAK6J8rX9j1fonIiIiIlJFlbgF\n0N1z3H0QcAPwGaFlHOKABcAId7+2bEKU47bgWdi/HVqcB63Tg45GREREREQCUmwLoJm1ADa7+77c\nMnd/CXiprAOTUrJ/B3w0MbSt1j8RERERkSrtaC2AawitxSeVVW7rX8ue0Lp30NGIiIiIiEiAjpYA\nqrmoMsvX+qeZP0VEREREqrpYZgGVyubjZ0JJYMvzoXWvoKMREREREZGAKQE8Ue3bDh89FdruMzbY\nWEREREREpEIoyTIQvcysxMtFuPuLxxGPlJYFz8CBHdCql1r/REREREQEKFkCOCr8dTRGaG1AJYBB\n27cdPno6tN3n3mBjERERERGRCqMkCeBk4OOyDkRK0ceTQq1/rXtDq55BRyMiIiIiIhVESRLA9939\nlTKPRErHvm3wsVr/RERERESkME0Cc6L56Gk4sBNap0PL84KORkREREREKhAlgCeSvVtDk7+AWv9E\nRERERKQQJYAnko/DrX+n9oGW5wYdjYiIiIiIVDDFjgF0dyWIlcXeraGF30GtfyIiIiIiEpUSvBPF\nR0/BwV1wal9o0SPoaEREREREpAIq8QLvUoHlHfvX975gYxERESlnBw4cYOvWrezatYsjR44EHY6I\nSKmIi4ujdu3aJCcnEx8fX2rnVQJ4IvhoIhzcDW0uhObnBB2NiIhIuTlw4ABff/01devWpVWrVtSo\nUQMzCzosEZHj4u4cOnSInTt38vXXX9OiRYtSSwLVBbSy27MFFjwb2tbYPxERqWK2bt1K3bp1SUlJ\noWbNmkr+ROSEYGbUrFmTlJQU6taty9atW0vt3EoAK7uPJoRa/9peBM3PDjoaERGRcrVr1y4SExOD\nDkNEpMwkJiaya9euUjufEsDKbM8WWDA5tK3WPxERqYKOHDlCjRo1gg5DRKTM1KhRo1THNysBrMw+\nfBIO7YG2/aBZt6CjERERCYS6fYrIiay0n3FKACurPdnw7ymhbbX+iYiIiIhICSgBrKxyW/9SL4Zm\nXYOORkREREREKgElgJXR7s15Wv/GBhuLiIiIiIhUGkoAK6MPn4RDeyH1Emiq1j8REZGqIDMzEzPL\n95WQkMCpp57KiBEjWL58eaFjcnJymD59OhdccAH16tUjPj6eFi1aMHz4cBYvXlyofuPGjWnevHnU\n92/fvj1mxnPPPVdo36xZszAznnjiieP/oCJSppQAVja7N8N/wg9etf6JiIhUOUOHDuWll17ipZde\nYsKECVx22WXMmjWL7t27s27duki9PXv2cOmll3LjjTeyd+9exo4dy9NPP83QoUN566236NatG5Mm\nTcp37j59+rB+/XpWrVqVr3zDhg2sWLGC6tWrk5mZWSim+fPnA9C3b9/S/8AiUqqqBx2AxOiD34da\n/077ATRNCzoaERERKWdpaWkMGzYsX1lqaiqjR49mzpw53HXXXQDceuutZGRkcN999/GrX/0qX/17\n7rmHCy+8kNtvv53U1FQuuugiIJTAzZo1i8zMTNq2bRupn5v0DR8+nLfffrtQTJmZmdSpU4cuXbqU\n5kcVkTKgFsDKZPf38J/nQ9t9xgQbi4iIiFQYTZo0AaBmzZoALFmyhBkzZtC9e3d++ctfFqqfkpLC\nK6+8AsDYsf/tUZTbgpfbopcrMzOT9u3bc8011/Dtt9+ycuXKyL4NGzaQlZVF7969qVZNv1qKVHSB\n/i81szpm9pqZfWlmy83sXDNLNrMMM1sZ/l43yBgrlA/+AIf3Qbv+0OSsoKMRERGRAOzdu5fs7Gyy\ns7P55ptv+Nvf/sb9999PSkoKQ4YMAWD27NkAjBw5ssg1xM444wzOPfdcPvnkk0jX0dTUVJo2bVqo\nm2dmZibp6en07NmzUDfQ3G11/xSpHILuAvoH4O/ufpWZ1QROAu4D3nH3x8xsLDAWUHPXrk15Wv80\n9k9ERORoWo19s8h9vx7Uieu6twDglQVfc9/rnxdZd+1jl0W2L5/wPku/3Rm13tBzmvPo4M4AfL5+\nB1dM/FeR5zkeDz30EA899FC+sg4dOvD+++/TqFEjAJYuXQqEuosWp2vXrnz44Yd8/vnntGzZEggl\ncjNmzCArK4vTTjst0sL385//nNq1a5OWlsb8+fO5+eabgf8mgH369CmVzyciZSuwFkAzSwJ6A88D\nuPtBd98ODACmh6tNBwYGE2EFE2n9uwwanxl0NCIiIhKQUaNGkZGRQUZGBvPmzePxxx8nOzub/v37\nR1rydu4MJalJSUnFnisxMRGAHTt2RMpyW/JyE7vc7+np6ZHvBVsAk5OT6dy583F/NhEpe0G2ALYG\nNgNTzexM4BNgNNDQ3TeE62wEGgYUX8WxayMs1Ng/ERGRWJS0xe267i0irYFH88YdvUpUr1OzpFJr\n8Sso76QtAJdffjnp6en06NGDMWPGMGvWrKiJXTTREsW84wBHjRpFZmYmqampNG7cGAglgOPGjWPF\nihUkJiaSlZXFwIEDNf5PpJII8n9qdSANmOTuZwF7CHX3jHB3BzzawWY2yswWmtnCzZs3l3mwgfrg\nD3B4P7S/XK1/IiIiUkj37t1JSkrin//8JwAdO3YEYNGiRcUel7u/U6dOkbLWrVvTsmVL3n33XeC/\n4/9ynX/++VSrVo3MzEyN/xOphIJMANcD6919Qfj1a4QSwk1m1hgg/P37aAe7+2R37+bu3erXr18u\nAQdi10ZY+EJoW2P/REREpAiHDx9m165dAAwePBiA559/ntDf0wtbtmwZH374IWlpaZHxf7n69u3L\nhg0bmD9/PllZWfkSwKSkJLp06cL8+fOVAIpUQoElgO6+EfjGzNqFiy4ElgF/AW4Il90A/DmA8CqO\nf40Ptf6dfgU06nT0+iIiIlLlZGRksGfPHrp27QrAmWeeydChQ/n44495+OGHC9XfunVrZC3Bxx57\nrND+3IQu99i8CWDu63fffZfMzExSUlIiLY4iUvEFPQvoHcDL4RlAvwJGEEpK/2hmNwHrgGsCjC9Y\nOzfAwqmh7XS1/omIiEio2+aMGTMAOHDgAF988QWTJ0+mRo0a+db8e/bZZ9m0aROPPPIIGRkZDB48\nmOTkZLKyspg6dSrZ2dk89dRT9OvXr9B75CaA7733Hq1bt6Z58+b59qenpzN+/Hg2btzIkCFDilxq\nQkQqnkATQHdfDHSLsuvC8o6lQvrXeDhyAE6/EhrpL2siIiICM2fOZObMmQBUq1aNevXqcfHFF3Pv\nvfdy9tlnR+rVrl2bt99+mxdffJHp06fz61//mt27d9OwYUMuvvhifvrTn9KlS5eo79G8eXPatGnD\n6tWrC7X+AfTq1Qszw921/INIJRN0C6AUZed38Mm00Ha6Zv4UERGp6vr06VPkeL6ixMXFMWLECEaM\nGBHz+61atarIfcnJyeTk5MR8ThEJnubrrahyW/86DFDrn4iIiIiIlAolgBXRjm/ztP5p7J+IiIiI\niJQOJYAV0b/Gw5GD0GEgNOwQdDQiIiIiInKCUAJY0exYD4umA6Z1/0REREREpFQpAaxo3v9dqPXv\njEHQ4PSgoxERERERkROIEsCKZMd6WPQiYJr5U0RERERESp0SwIrk/Scg5xB0HAwN2gcdjYiIiIiI\nnGCUAFYU27+BRS8BBr3/L+hoRERERETkBKQEsKKItP4NUeufiIiIiIiUCSWAFcH2r+HTGWjsn4iI\niIiIlCUlgBVBbutfp6ug/mlBRyMiIiIiIicoJYBB27Yu1Ppn1TT2T0REREREypQSwKC9/1vIOQwd\n1fonIiIiUtndeOONmFnQYZS7Pn360KpVq3J9z1iudWZmJmbGtGnTii2rCpQABmnbWlj8Sqj1T2P/\nREREpBi5v6zm/UpISODUU09lxIgRLF++POpxOTk5TJ8+nQsuuIB69eoRHx9PixYtGD58OIsXLy5U\nv3HjxjRv3jzqudq3b4+Z8dxzzxXaN2vWLMyMJ5544vg+6DHITQRyv+Li4mjQoAFXXHEF//rXv8o9\nnqBNmzat0L2S96tt27ZBhygBqh50AFXae+HWv87XQor+I4qIiMjRDR06lP79+wOwb98+lixZwnPP\nPcfs2bP5/PPPadmyZaTunj17GDRoEBkZGXTv3p2xY8eSnJxMVlYWU6dOZebMmUyYMIHbbrstckyf\nPn2YNWsWq1atypcobNiwgRUrVlC9enUyMzMZOXJkvrjmz58PQN++fcvy4xdr0qRJnHLKKRw8eJAv\nvviCyZMn8/e//5133nmH3r17l0sMU6ZM4ZlnnimX9zqaO++8k7PPPrtQee3atQOIpuLp3bs3+/bt\no0aNGkGHUq6UAAZl6xr4bGZ47N89QUcjIiIilURaWhrDhg3LV5aamsro0aOZM2cOd911V6T81ltv\nJSMjg/vuu49f/epX+Y655557uPDCC7n99ttJTU3loosuAkIJ3KxZs8jMzMyXAGZmZgIwfPhw3n77\n7UJxZWZmUqdOHbp06RLzZ5o2bRojRoxgzZo1x9WN8KqrriIlJSXyOj09nQEDBjBu3LhySwBr1KhR\nYRKKXr16cdVVVwUdRoVVrVo1EhISgg6j3KkLaFByx/51/qFa/0REROS4NGnSBICaNWtGypYsWcKM\nGTPo3r07v/zlLwsdk5KSwiuvvALA2LFjI+W5LXi5LXq5MjMzad++Pddccw3ffvstK1eujOzbsGED\nWVlZ9O7dm2rVKs6vlxdeeCFAvlhzuTuTJk2ia9eunHTSSZxyyin07du30OcG2Lt3L3fffTeNGzem\nVq1a9OjRg3feeSfqGLSixqUtWbKEQYMGUa9ePRISEujQoQO/+c1vOHLkSNTjd+zYwW233UaDBg1I\nSEigZ8+eLFiw4HguR1R5x8E9/fTTtGvXjoSEBDp16sQbb7wBwOeff86ll15KYmIi9erV48477+TQ\noUNRz/fVV18xYMAAkpKSSExMZNCgQXz11VeF6sVy/ffv388999xDkyZNqFWrFuecc07UP0Lk+vOf\n/8xZZ51FQkICzZs354EHHoga79HGBU6dOpUzzjiD+Ph4WrZsyW9+85uo7zdp0iTatWtHfHw8qamp\nTJw4MdINN/cPJwBbt27lrrvuok2bNiQkJFCvXj26du3KuHHjivwsZUEtgEHY+hUsngkWp9Y/ERER\nicnevXvJzs4GQl1Aly5dyv33309KSgpDhgyJ1Js9ezYAI0eOLHKijDPOOINzzz2XDz/8kHXr1tGy\nZUtSU1Np2rRpvl9cIfSLcd++fenZs2ekG2hqampkHwTb/TOa1atXA5CcnFxo3/Dhw5k5cyZXXXUV\nI0aM4MCBA7z88sv069ePOXPmcOWVV0bqXn311fz1r39l4MCBXHTRRaxZs4ZBgwbRunXrEsWxcOFC\n0tPTqVGjBrfffjuNGjVi3rx5jBkzhs8++4yXX3650DGXXHIJ9evX58EHH2TLli387ne/47LLLmPN\nmjUl7sK5a9euyL2SV61atTj55JPzlT311FNs27aNkSNHkpCQwJNPPsmgQYP405/+xM0338zQoUMZ\nOHAgb7/9NhMmTKBBgwb87Gc/y3eOPXv20KdPH7p3786jjz7KypUrefrpp/n444/59NNPadSoUaRu\nLNd/6NChzJ07lyuuuIJLLrmE1atXM3jw4KjX//XXX2fIkCG0atWKBx98kOrVqzN16lTefPPNEl2z\nXM888wybNm3ipptuok6dOsyYMYMxY8bQrFkzrrvuuki9xx9/nLFjx5KWlsajjz7K3r17GTduHPXr\n1y90zquvvpr33nuPW2+9lc6dO7Nv3z6WL19OZmYm99xTjjmBu1f6r65du3ql8vpP3B9KdJ9za9CR\niIiIVGrLli2LvuOhxIr5dRzmz5/vQNSvDh06+PLly/PVHzx4sAP+ySefFHveO+64wwGfN29epGzY\nsGEO+IoVK9zd/bvvvnPAZ86c6e7u55xzjg8dOjRSf9SoUQ74p59+ekyfberUqQ74mjVrjun4G264\nIRLv5s2b/dtvv/WMjAzv3LmzA/7UU0/lqz9nzhwH/Nlnn81XfujQIe/atau3atXKc3Jy3N39zTff\ndMBHjhyZr25ueejX6cKx5HXeeed5XFycf/bZZ5GynJwcv/rqqx3wf/zjH4WOv+222/Kd449//KMD\n/swzzxz1euRez6K+br/99kjd3PuqSZMmvn379kj5Z5995oCbmc+ePTvf+dPS0rxRo0b5ytLT0x3w\n0aNH5yvPvda33HJLobKSXP+33nrLAb/hhhvy1X399dcLXf/Dhw978+bNvV69er558+ZI+fbt271F\nixYO+NSpUwt99mhljRs3znc99uzZ4ykpKd6jR49I2ZYtWzwhIcE7derk+/bti5Rv2LDBExMTHfD5\n8+dHYoj271pSRT7r8gAWeglyp4rTRl9VbFkdHvsXB73/X9DRiIiISCUzatQoMjIyyMjIYN68eTz+\n+ONkZ2fTv39/1q1bF6m3c+dOAJKSkoo9X2JiIgA7duyIlOW25OW27OV+T09Pj3zP20KYmZlJcnIy\nnTt3Pmr8hw4dIjs7O9/X7t27Adi2bVuhfbFo164d9evXp2nTpvTr149169Yxbtw4fvKTn+SrN2PG\nDGrXrs3AgQPzvdf27du54oorWLt2baTb6Lx58wC4++67852jf//+nH766UeN6fvvv+fDDz/kyiuv\nzHd9zIz7778fCLVaFZR3LCfABRdcAETvzlqUBx98MHKv5P264447CtW98cYb890rnTt3JjExkSZN\nmjB48OB8dc8//3w2btwY+XfLK293YoBBgwbRrl075s6dGymL5frnHlewhWzgwIG0a9cuX9knn3zC\nN998w4gRI/KNBU1KSuLWW28t9loVNGLEiHzX46STTqJHjx75rn9GRgb79+/ntttuyzeWsFGjRlx/\n/fX5zlerVi3i4+NZsGABa9eujSmW0qYuoOXtvd+CH4Eu10O9NkFHIyIicmJ6eMfR61RSeSdsAbj8\n8stJT0+nR48ejBkzhlmzZgHRE7tooiWKeccBjho1KtLds3HjxkAoARw3bhwrVqwgMTGRrKwsBg4c\nWKLxfx988EGRXUXT0tIKlYUaNkpm9uzZJCYmsmvXLubOncuMGTPYv39/oXrLly9n165dNGzYsMhz\nbdq0idNOO401a9ZQrVq1qEsntGvXrsjlN3KtWbMGCHW3Lej000+nWrVqUcfInXrqqfle16tXD4At\nW7YU+355derUKd+9UpyC7wdQt27dqEuC1K1bNxLLKaecEimvU6dOvm6euU4//XTmzp3Lnj17OPnk\nk2O6/l999RXVqlXjtNMKr5d9+umns2LFisjr3OvYvn37QnU7dOhQ5HtFE+161KtXL9/1z/236QNW\nVAAAE4BJREFULZiIRiurWbMmv//97xk9ejStW7emQ4cOXHDBBQwcODAyVrW8KAEsT1tWw5JX1fon\nIiIipap79+4kJSXxz3/+M1LWsWNH5syZw6JFi6ImVrkWLVoEhJKFXK1bt6Zly5a8++67QKiFL7f1\nD0ItQNWqVSMzMzOSaJZ0/N+ZZ55JRkZGvrK3336bcePGMWPGjGKTgqPp3bt3pOVn0KBB1KpViwce\neICuXbvygx/8IFLP3alfv35kEpxoOnbsmO91eS/uHhcXF7U8loS4NN6vqPLjieVYrn95K+5zH6tb\nb72VAQMG8Oabb/Luu+/y2muvMXHiRH74wx9G/nBTHpQAlqf3xoVa/84aBsmF/6ogIiIicqwOHz7M\ngQMHIq8HDx7MI488wvPPP89NN90UNYFZtmwZH374IWlpafnWD4RQQjdt2jTmz59PVlYWDzzwQGRf\nUlISXbp0Yf78+ZGWw5ImgHXr1i3UKrV+/XoAevbseVzLQBT06KOP8uqrr3L33Xdz8cUXR36pT01N\nJSsrix49euRrwYqmVatW5OTksHLlykJdPvO2PhUld6KSL774otC+L7/8kpycnKitTZXR9u3b2bhx\nY6FWwOXLl9OgQYPIxDOxXP9TTz2VnJwcsrKyCrWiFmx9zb2OX375ZaHzLFu2LObPczS59+qKFSsi\nXXRzFXVvNG7cmJEjRzJy5EiOHDkSmQznpz/9adQ1G8uCxgCWl9zWv2rVoZda/0RERKT0ZGRksGfP\nHrp27RopO/PMMxk6dCgff/wxDz/8cKFjtm7dGllP8LHHHiu0Pzehyz02bwtg7ut3332XzMxMUlJS\nAm+xiaZu3brceeedfPnll8ycOTNS/qMf/YicnBzuvffeqMdt2rQpsn3FFVcAMH78+Hx1/vrXvx61\n+ydAgwYNOO+885g3bx5Lly6NlLs7jz76KBBqrTxRFLyXXn/9dVasWMHAgQMjZbFc/wEDBgAUWiph\n7ty5hZKsrl270qxZM6ZOnZpv/OjOnTt55plnju0DFaNfv37Ex8czadKkfF2NN27cWGhm171797J3\n7958ZXFxcZFxoVu3bi31+IqiFsDy8u5vwHNCY/+SSzZlsIiIiEhBixYtYsaMGQAcOHCAL774gsmT\nJ1OjRo1C6/09++yzbNq0iUceeYSMjAwGDx5McnIyWVlZkV+Sn3rqKfr161fofXITwPfee4/WrVsX\nGguWnp7O+PHj2bhxI0OGDCn3LpIlNXr0aMaPH88vfvELhg4dSlxcXGTpgYkTJ7Jo0SIuv/xyUlJS\nWL9+PR999BGrVq2KjCfr378/l1xyCVOmTCE7OzuyDMTkyZPp3LkzS5YsOWoMf/jDH0hPT6dXr16R\nZSDeeOMN3nrrLa677royGwP2/vvvRx0DCXD99deX+r9ZSkoKc+bM4bvvvqNPnz6RZSAaNmyY748Q\nsVz/Sy65hCuuuILp06ezdetWLr30UlavXs2zzz5Lx44d8yXVcXFxjB8/nmuuuYZzzjmHm2++merV\nq/PCCy9Qr149vv7661L9vPXq1eOhhx7ivvvuo2fPngwbNoy9e/cyefJkTjvtNBYuXBi5xllZWaSn\npzNo0CA6duxI3bp1Wb58OZMmTaJ169b06tWrVGMrVkmmCq3oXxV+GYjNWe4P13H/ebL71jVBRyMi\nInLCKMnU6CeKaMtAVKtWzevXr++DBg3yf//731GPO3z4sL/wwguenp7udevW9Ro1anizZs182LBh\nR122oU2bNg74jTfeWGjfli1b3Mwc8AkTJhzXZyutZSDyTv2f19ixYx3wadOm5St/8cUX/fzzz/fa\ntWt7fHy8t2zZ0gcNGuSzZs3KV2/37t0+evRob9CggSckJPg555zj77zzjg8ZMsRr1aoVNZaCFi9e\n7AMGDPC6det6zZo1vX379v7444/74cOHS3S8u0ddDiGaoy0DAfihQ4fcPfpSCLlatmzp6enphcof\neuihQv9e6enp3rJlS1+9erVfeeWVXrt2bT/llFP8yiuv9JUrV0aNs6TXf+/evX733Xd7w4YNPSEh\nwc8++2x/6623irxWs2fP9jPPPNNr1qzpzZo185/97Gf+9ttvx7QMRLTrUdT7TZw40VNTU71mzZre\ntm1bnzBhgj/55JMO+IIFC9zdPTs72//3f//XzzzzTE9KSvKEhARv06aNjx492r/77ruo1yev0lwG\nwryMBpKWp27duvnChQuDDqNoc0aFun+m/QiunBB0NCIiIieM5cuXl2gqfpGy0KlTJw4dOhR1zJlU\nbXfccQcTJ05kw4YNUWdGjVVJnnVm9om7dzvauTQGsKxlr4TP/6SxfyIiIiKV1L59+wqVvfnmmyxd\nujRq91mpOqJ1sd2wYQMvvvgiHTt2LJXkr7RpDGBZyx37d9ZwqNvy6PVFREREpEJ55JFH+PTTT+nb\nty9JSUksXrw4Mq5szJgxQYcnAcrMzOSee+5h8ODBNGvWjLVr1zJlyhR2794ddXKlikAJYFnanAVL\nX4NqNbTun4iIiEgl1atXLz744APGjRvHjh07SE5OZsiQIfziF7+gWbNmQYcnAWrbti1t2rRhypQp\nbNmyhYSEBLp168a9995baLmTiiLQBNDM1gK7gCPAYXfvZmYPAzcDm8PV7nP3vwYT4XF69/FQ61/a\nDVCnRdDRiIiIiMgx6N+/P/379w86DKmA2rZty9y5c4MOIyYVoQWwr7tnFygb7+6/DSSa0rJ5BSyd\nHWr96/XToKMRERERERHRJDBl5t3HAYe04VCn+VGri4iIiIiIlLWgE0AH3jazT8xsVJ7y/zGzJWb2\ngpnVjXagmY0ys4VmtnDz5s3RqgQnexUsnRNq/Tv/7qCjEREROaGdCEtaiYgUpbSfcUEngOe7exrw\nA+B2M+sNTALaAF2ADcAT0Q5098nu3s3du9WvX7/cAi6R5FPhhy/BhQ+o9U9ERKQMxcXFcejQoaDD\nEBEpM4cOHSIuLq7UzhfoGEB3/zb8/Xszex04x93fy91vZlOAN4KK75hVqwanXxF0FCIiIie82rVr\ns3PnTlJSUoIORUSkTOzcuZPatWuX2vkCawE0s5PNrHbuNnAxsNTMGuepNghYGkR8IiIiUvElJyez\nbds2srOzOXjwoLqDisgJwd05ePAg2dnZbNu2jeTk5FI7d5AtgA2B180sN45X3P3vZvaSmXUhND5w\nLXBLcCGKiIhIRRYfH0+LFi3YunUra9eu5ciRI0GHJCJSKuLi4qhduzYtWrQgPj6+1M4bWALo7l8B\nZ0YpHx5AOCIiIlJJxcfH07hxYxo3bnz0yiIiVVzQk8CIiIiIiIhIOVECKCIiIiIiUkUoARQRERER\nEakilACKiIiIiIhUEUoARUREREREqgglgCIiIiIiIlWEEkAREREREZEqwtw96BiOm5ltBtYFHYdU\nOSlAdtBBSJWl+0+CpntQgqT7T4JUUe+/lu5e/2iVTogEUCQIZrbQ3bsFHYdUTbr/JGi6ByVIuv8k\nSJX9/lMXUBERERERkSpCCaCIiIiIiEgVoQRQ5NhNDjoAqdJ0/0nQdA9KkHT/SZAq9f2nMYAiIiIi\nIiJVhFoARUREREREqgglgCIiIiIiIlWEEkCRMDNrbmbzzWyZmX1hZqPD5clmlmFmK8Pf64bLzcye\nNLNVZrbEzNLynOuGcP2VZnZDUJ9JKh8zizOzT83sjfDr1ma2IHyfvWpmNcPl8eHXq8L7W+U5x73h\n8hVmdkkwn0QqIzOrY2avmdmXZrbczM7VM1DKi5ndFf75u9TMZppZgp6BUlbM7AUz+97MluYpK7Xn\nnZl1NbPPw8c8aWZWvp+waEoARf7rMPBTd+8A9ABuN7MOwFjgHXdPBd4Jvwb4AZAa/hoFTILQwwN4\nCOgOnAM8lPsAESmB0cDyPK8fB8a7e1tgG3BTuPwmYFu4fHy4HuF79lrgDOBS4Gkziyun2KXy+wPw\nd3dvD5xJ6F7UM1DKnJk1Be4Eurl7RyCO0LNMz0ApK9MI3SN5lebzbhJwc57jCr5XYJQAioS5+wZ3\nXxTe3kXoF5+mwABgerjadGBgeHsA8KKHfAzUMbPGwCVAhrtvdfdtQAYV6D+9VFxm1gy4DHgu/NqA\nC4DXwlUK3n+59+VrwIXh+gOAWe5+wN3XAKsI/VASKZaZJQG9gecB3P2gu29Hz0ApP9WBWmZWHTgJ\n2ICegVJG3P09YGuB4lJ53oX3Jbr7xx6acfPFPOcKnBJAkSjCXUnOAhYADd19Q3jXRqBheLsp8E2e\nw9aHy4oqFzma3wP/B+SEX9cDtrv74fDrvPdS5D4L798Rrq/7T45Va2AzMDXcDfk5MzsZPQOlHLj7\nt8Bvga8JJX47gE/QM1DKV2k975qGtwuWVwhKAEUKMLNTgNnA/7r7zrz7wn/F0dopUurM7HLge3f/\nJOhYpMqqDqQBk9z9LGAP/+3+BOgZKGUn3G1uAKE/RDQBTkYtxxKgE/l5pwRQJA8zq0Eo+XvZ3eeE\nizeFm/IJf/8+XP4t0DzP4c3CZUWVixSnJ3Clma0FZhHq9vQHQt1Mqofr5L2XIvdZeH8SsAXdf3Ls\n1gPr3X1B+PVrhBJCPQOlPFwErHH3ze5+CJhD6LmoZ6CUp9J63n0b3i5YXiEoARQJC48deB5Y7u6/\ny7PrL0DurE43AH/OU/6j8MxQPYAd4W4DbwEXm1nd8F80Lw6XiRTJ3e9192bu3orQBAb/dPfrgfnA\nVeFqBe+/3PvyqnB9D5dfG54hrzWhgef/LqePIZWYu28EvjGzduGiC4Fl6Bko5eNroIeZnRT+eZx7\n/+kZKOWpVJ534X07zaxH+H7+UZ5zBa760auIVBk9geHA52a2OFx2H/AY8EczuwlYB1wT3vdXoD+h\nAeZ7gREA7r7VzH4B/Cdc7xF3LzjIWKSkxgCzzOyXwKeEJ+gIf3/JzFYRGsR+LYC7f2FmfyT0i9Nh\n4HZ3P1L+YUsldQfwcniq/a8IPdeqoWeglDF3X2BmrwGLCD27PgUmA2+iZ6CUATObCfQBUsxsPaHZ\nPEvzd76fEJpptBbwt/BXhWChP5aIiIiIiIjIiU5dQEVERERERKoIJYAiIiIiIiJVhBJAERERERGR\nKkIJoIiIiIiISBWhBFBERERERKSKUAIoIiJSzsysj5m5md0YdCwiIlK1KAEUEZFKJ08C9f/Cr+uY\n2cNm1ifg0CLMrEs4plZBxyIiIpJLC8GLiMiJoA6hRXwBMgOMI68uhGLKBNYW2PceocWBD5VvSCIi\nUtWpBVBEROQozKx2aZ7P3XPcfb+7HynN84qIiByNEkAREanUwt0+14RfPhTuGupmtrZAvR+a2b/M\nbJeZ7TWzBWZ2VZTzuZlNM7MLw/V3A/PC+5qY2RNmttjMtpnZfjNbZmZjzCwuzzkeBqaGX87PE9O0\n3JijjQE0s5PN7FEzW21mB8xso5m9aGYtC37m3OPNbISZfRGuv87M/u/Yr6aIiJzo1AVUREQqu+XA\nXcB44HVgTrh8d24FM/slcD/wd+ABIAcYBPzJzP7H3Z8qcM5uwBBgCjA9T3lnYHD4fVYDNYBLgceA\nU4FbwvXmAI2BUcCvwzESPiYqM6sBvAX0BF4DngBSgduAi82sm7uvL3DYrUBD4HlgOzAMeNzM1rv7\nK0W9l4iIVF3m7kHHICIiEpNwq9984B53/214opU1wM/d/eECddOAT4BH3f2+AvvmAhcATd19V7gs\n9wdjP3f/R4H6tYD9XuCHp5m9BFwHNHP3DeGyGwm1AvZ198wi4h/h7tPCZTcDk4Fx7v5/eepeBrwB\nzHD34QWO3wCc7u47wuUnAeuAVe5+btFXUEREqip1ARURkRPd9YAD080sJe8X8BegNlAwWfqsYPIH\n4O77cpM/M6tpZsnh87xF6Gdqt+OIcxChlslHC7znm8BiYICZFfy5PTU3+QvX3Qt8TKjlUEREpBB1\nARURkRPd6YABXxZTp2GB11nRKplZdWAs8COgbfi8edU9xhgBWgPfufu2KPu+IDSraArwfZ7yr6LU\n3QLUO444RETkBKYEUERETnRGqAXwB0BRs25+UeD13iLq/Q64A3gV+BWhZOwQkAY8Tvn3rNEsoiIi\nEhMlgCIiciIobkD7SkITtXzt7suLqVcSw4H33P3avIVm1jbGmKL5CrjUzOq4+/YC+zoAO4HsGM8p\nIiKSj8YAiojIiSB3xs/kKPteCn//dd6lGnKZWcHun8U5QoFun2Z2MqFZSGOJKZq5hH4ujy1w/h8A\nZwF/cfecGGIVEREpRC2AIiJS6bn7FjNbBVxrZquBTcAed5/n7v8Jr8v3MLDYzP4EfEdomYauQH+g\nZgnf6jXgFjN7FfgHobGDPyY07q6g/xCa1OV+M6sL7AHWuPuCIs49DbgBGBOe1fQ9QuMMfxL+PPcV\ncZyIiEiJKQEUEZETxfWE1gL8NZC7HMI8AHf/uZktBO4E/hc4mdD4vaXhspK6G9gFXAMMAL4htHTD\nfwglhBHu/rWZ/RgYA0witGbgdCBqAujuh8zsEuBnwA8JrTe4HfgT8DN3/yaGOEVERKLSOoAiIiIi\nIiJVhMYAioiIiIiIVBFKAEVERERERKoIJYAiIiIiIiJVhBJAERERERGRKkIJoIiIiIiISBWhBFBE\nRERERKSKUAIoIiIiIiJSRSgBFBERERERqSKUAIqIiIiIiFQR/x9G2NFZrjqMygAAAABJRU5ErkJg\ngg==\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x16cc8714eb8>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "naive_test_accuracy = [68.0, 68.0, 72.0, 76.0, 75.0, 73.0, 76.0, 78.0, 81.0, 80.0, 80.0, 81.0, 82.0, 81.0, 80.0, 79.0, 81.0, 82.0, 80.0, 83.0]\n",
    "reg_test_accuracy = [55.0, 65.0, 71.0, 72.0, 75.0, 78.0, 80.0, 81.0, 84.0, 84.0, 83.0, 84.0, 83.0, 85.0, 85.0, 86.0, 86.0, 83.0, 84.0, 85.0]\n",
    "\n",
    "f = pylab.figure(figsize=(15,5))\n",
    "pylab.plot(np.arange(500,10001,500),naive_test_accuracy, linestyle='--', linewidth = 2.0, label='BOW')\n",
    "pylab.plot(np.arange(500,10001,500),reg_test_accuracy, linewidth = 2.0, label='BOW + Region Embeddings')\n",
    "pylab.legend(fontsize=18)\n",
    "pylab.xlabel('Iteration', fontsize=18)\n",
    "pylab.ylabel('Test Accuracy', fontsize=18)\n",
    "pylab.show()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
