{
 "nbformat": 4,
 "nbformat_minor": 2,
 "metadata": {
  "language_info": {
   "name": "python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "version": "3.6.6-final"
  },
  "orig_nbformat": 2,
  "file_extension": ".py",
  "mimetype": "text/x-python",
  "name": "python",
  "npconvert_exporter": "python",
  "pygments_lexer": "ipython3",
  "version": 3,
  "kernelspec": {
   "name": "python36664bitea6884f10f474b21a2a2f022451e0d09",
   "display_name": "Python 3.6.6 64-bit"
  }
 },
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm import tqdm\n",
    "from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
    "from tensorflow.keras.layers import Dense, Dropout, LSTM, Embedding, Bidirectional\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.preprocessing.text import Tokenizer\n",
    "from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
    "from tensorflow.keras.utils import to_categorical\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.datasets import fetch_20newsgroups\n",
    "import numpy as np\n",
    "\n",
    "from glob import glob\n",
    "import random\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_embedding_vectors(word_index, dim=100):\n",
    "    embedding_matrix = np.zeros((len(word_index) + 1, dim))\n",
    "    with open(f\"data/glove.6B.{dim}d.txt\", encoding=\"utf8\") as f:\n",
    "        for line in tqdm(f, \"Reading GloVe\"):\n",
    "            values = line.split()\n",
    "            # get the word as the first word in the line\n",
    "            word = values[0]\n",
    "            if word in word_index:\n",
    "                idx = word_index[word]\n",
    "                # get the vectors as the remaining values in the line\n",
    "                embedding_matrix[idx] = np.array(values[1:], dtype=\"float32\")\n",
    "\n",
    "    return embedding_matrix\n",
    "\n",
    "\n",
    "def create_model(word_index, units=128, n_layers=2, cell=LSTM, bidirectional=False,\n",
    "                embedding_size=100, sequence_length=100, dropout=0.3, \n",
    "                loss=\"categorical_crossentropy\", optimizer=\"adam\", \n",
    "                output_length=2):\n",
    "    \"\"\"\n",
    "    Constructs a RNN model given its parameters\n",
    "    \"\"\"\n",
    "    embedding_matrix = get_embedding_vectors(word_index, embedding_size)\n",
    "    model = Sequential()\n",
    "    # add the embedding layer\n",
    "    model.add(Embedding(len(word_index) + 1,\n",
    "              embedding_size,\n",
    "              weights=[embedding_matrix],\n",
    "              trainable=False,\n",
    "              input_length=sequence_length))\n",
    "\n",
    "    for i in range(n_layers):\n",
    "        if i == n_layers - 1:\n",
    "            # last layer\n",
    "            if bidirectional:\n",
    "                model.add(Bidirectional(cell(units, return_sequences=False)))\n",
    "            else:\n",
    "                model.add(cell(units, return_sequences=False))\n",
    "        else:\n",
    "            # first layer or hidden layers\n",
    "            if bidirectional:\n",
    "                model.add(Bidirectional(cell(units, return_sequences=True)))\n",
    "            else:\n",
    "                model.add(cell(units, return_sequences=True))\n",
    "        model.add(Dropout(dropout))\n",
    "\n",
    "    model.add(Dense(output_length, activation=\"softmax\"))\n",
    "    # compile the model\n",
    "    model.compile(optimizer=optimizer, loss=loss, metrics=[\"accuracy\"])\n",
    "    return model\n",
    "\n",
    "\n",
    "\n",
    "def save_imdb_data():\n",
    "\n",
    "    pos_training_files = glob(\"data/aclImdb/train/pos/*.txt\")\n",
    "    neg_training_files = glob(\"data/aclImdb/train/neg/*.txt\")\n",
    "    pos_testing_files = glob(\"data/aclImdb/test/pos/*.txt\")\n",
    "    neg_testing_files = glob(\"data/aclImdb/test/neg/*.txt\")\n",
    "\n",
    "    print(\"total pos training files:\", len(pos_training_files))\n",
    "    print(\"total neg training files:\", len(neg_training_files))\n",
    "    print(\"total pos testing files:\", len(pos_testing_files))\n",
    "    print(\"total neg testing files:\", len(neg_testing_files))\n",
    "\n",
    "    # load the data, 0 for negative sentiment, 1 for positive sentiment\n",
    "    data = []\n",
    "    for file in tqdm(pos_training_files, \"Loading positive training data\"):\n",
    "        data.append((open(file).read().strip(), 1))\n",
    "        \n",
    "    for file in tqdm(neg_training_files, \"Loading negative training data\"):\n",
    "        data.append((open(file).read().strip(), 0))\n",
    "\n",
    "    for file in tqdm(pos_testing_files, \"Loading positive testing data\"):\n",
    "        data.append((open(file).read().strip(), 1))\n",
    "\n",
    "    for file in tqdm(neg_testing_files, \"Loading negative testing data\"):\n",
    "        data.append((open(file).read().strip(), 0))\n",
    "\n",
    "    # shuffle the data\n",
    "    random.shuffle(data)\n",
    "    with open(\"data/reviews.txt\", \"w\") as reviews_file:\n",
    "        with open(\"data/labels.txt\", \"w\") as labels_file:\n",
    "            for review, label in tqdm(data, \"Writing data to files\"):\n",
    "                print(review, file=reviews_file)\n",
    "                print(label, file=labels_file)\n",
    "\n",
    "    \n",
    "def load_imdb_data(num_words, sequence_length, test_size=0.25, oov_token=None):\n",
    "    # read reviews\n",
    "    reviews = []\n",
    "    with open(\"data/reviews.txt\") as f:\n",
    "        for review in f:\n",
    "            review = review.strip()\n",
    "            reviews.append(review)\n",
    "\n",
    "    labels = []\n",
    "    with open(\"data/labels.txt\") as f:\n",
    "        for label in f:\n",
    "            label = label.strip()\n",
    "            labels.append(label)\n",
    "\n",
    "\n",
    "    tokenizer = Tokenizer(num_words=num_words, oov_token=oov_token)\n",
    "    tokenizer.fit_on_texts(reviews)\n",
    "    X = tokenizer.texts_to_sequences(reviews)\n",
    "    \n",
    "    X, y = np.array(X), np.array(labels)\n",
    "\n",
    "    # pad sequences with 0's\n",
    "    X = pad_sequences(X, maxlen=sequence_length)\n",
    "\n",
    "    # convert labels to one-hot encoded\n",
    "    y = to_categorical(y)\n",
    "\n",
    "    # split data to training and testing sets\n",
    "    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1)\n",
    "\n",
    "    data = {}\n",
    "\n",
    "    data[\"X_train\"] = X_train\n",
    "    data[\"X_test\"]= X_test\n",
    "    data[\"y_train\"] = y_train\n",
    "    data[\"y_test\"] = y_test\n",
    "    data[\"tokenizer\"] = tokenizer\n",
    "    data[\"int2label\"] =  {0: \"negative\", 1: \"positive\"}\n",
    "    data[\"label2int\"] = {\"negative\": 0, \"positive\": 1}\n",
    "    \n",
    "    return data\n",
    "\n",
    "\n",
    "def load_20_newsgroup_data(num_words, sequence_length, test_size=0.25, oov_token=None):\n",
    "    # load the 20 news groups dataset\n",
    "    # shuffling the data & removing each document's header, signature blocks and quotation blocks\n",
    "    dataset = fetch_20newsgroups(subset=\"all\", shuffle=True, remove=(\"headers\", \"footers\", \"quotes\"))\n",
    "    documents = dataset.data\n",
    "    labels = dataset.target\n",
    "\n",
    "    tokenizer = Tokenizer(num_words=num_words, oov_token=oov_token)\n",
    "    tokenizer.fit_on_texts(documents)\n",
    "    X = tokenizer.texts_to_sequences(documents)\n",
    "    \n",
    "    X, y = np.array(X), np.array(labels)\n",
    "\n",
    "    # pad sequences with 0's\n",
    "    X = pad_sequences(X, maxlen=sequence_length)\n",
    "\n",
    "    # convert labels to one-hot encoded\n",
    "    y = to_categorical(y)\n",
    "\n",
    "    # split data to training and testing sets\n",
    "    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1)\n",
    "\n",
    "    data = {}\n",
    "\n",
    "    data[\"X_train\"] = X_train\n",
    "    data[\"X_test\"]= X_test\n",
    "    data[\"y_train\"] = y_train\n",
    "    data[\"y_test\"] = y_test\n",
    "    data[\"tokenizer\"] = tokenizer\n",
    "\n",
    "    data[\"int2label\"] = { i: label for i, label in enumerate(dataset.target_names) }\n",
    "    data[\"label2int\"] = { label: i for i, label in enumerate(dataset.target_names) }\n",
    "    \n",
    "    return data\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# max number of words in each sentence\n",
    "SEQUENCE_LENGTH = 300\n",
    "# N-Dimensional GloVe embedding vectors\n",
    "# using 100 here, feel free to use 200 or 300\n",
    "EMBEDDING_SIZE = 300\n",
    "# number of words to use, discarding the rest\n",
    "N_WORDS = 10000\n",
    "# out of vocabulary token\n",
    "OOV_TOKEN = None\n",
    "# 30% testing set, 70% training set\n",
    "TEST_SIZE = 0.3\n",
    "# number of CELL layers\n",
    "N_LAYERS = 1\n",
    "# the RNN cell to use, LSTM in this case\n",
    "RNN_CELL = LSTM\n",
    "# whether it's a bidirectional RNN\n",
    "IS_BIDIRECTIONAL = False\n",
    "# number of units (RNN_CELL ,nodes) in each layer\n",
    "UNITS = 128\n",
    "# dropout rate\n",
    "DROPOUT = 0.4\n",
    "### Training parameters\n",
    "LOSS = \"categorical_crossentropy\"\n",
    "OPTIMIZER = \"adam\"\n",
    "BATCH_SIZE = 64\n",
    "EPOCHS = 6\n",
    "\n",
    "def get_model_name(dataset_name):\n",
    "    # construct the unique model name\n",
    "    model_name = f\"{dataset_name}-{RNN_CELL.__name__}-seq-{SEQUENCE_LENGTH}-em-{EMBEDDING_SIZE}-w-{N_WORDS}-layers-{N_LAYERS}-units-{UNITS}-opt-{OPTIMIZER}-BS-{BATCH_SIZE}-d-{DROPOUT}\"\n",
    "    if IS_BIDIRECTIONAL:\n",
    "        # add 'bid' str if bidirectional\n",
    "        model_name = \"bid-\" + model_name\n",
    "    if OOV_TOKEN:\n",
    "        # add 'oov' str if OOV token is specified\n",
    "        model_name += \"-oov\"\n",
    "    return model_name"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": "Reading GloVe: 400000it [00:17, 23047.55it/s]\nModel: \"sequential\"\n_________________________________________________________________\nLayer (type)                 Output Shape              Param #   \n=================================================================\nembedding (Embedding)        (None, 300, 300)          37267200  \n_________________________________________________________________\nlstm (LSTM)                  (None, 128)               219648    \n_________________________________________________________________\ndropout (Dropout)            (None, 128)               0         \n_________________________________________________________________\ndense (Dense)                (None, 2)                 258       \n=================================================================\nTotal params: 37,487,106\nTrainable params: 219,906\nNon-trainable params: 37,267,200\n_________________________________________________________________\nTrain on 35000 samples, validate on 15000 samples\nEpoch 1/6\n35000/35000 [==============================] - 186s 5ms/sample - loss: 0.4359 - accuracy: 0.7919 - val_loss: 0.2912 - val_accuracy: 0.8788\nEpoch 2/6\n35000/35000 [==============================] - 179s 5ms/sample - loss: 0.2857 - accuracy: 0.8820 - val_loss: 0.2608 - val_accuracy: 0.8919\nEpoch 3/6\n35000/35000 [==============================] - 175s 5ms/sample - loss: 0.2501 - accuracy: 0.8985 - val_loss: 0.2472 - val_accuracy: 0.8977\nEpoch 4/6\n35000/35000 [==============================] - 174s 5ms/sample - loss: 0.2184 - accuracy: 0.9129 - val_loss: 0.2525 - val_accuracy: 0.8997\nEpoch 5/6\n35000/35000 [==============================] - 185s 5ms/sample - loss: 0.1918 - accuracy: 0.9246 - val_loss: 0.2576 - val_accuracy: 0.9035\nEpoch 6/6\n35000/35000 [==============================] - 188s 5ms/sample - loss: 0.1598 - accuracy: 0.9391 - val_loss: 0.2494 - val_accuracy: 0.9004\n"
    }
   ],
   "source": [
    "from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint\n",
    "\n",
    "import os\n",
    "import pickle\n",
    "\n",
    "# create these folders if they does not exist\n",
    "if not os.path.isdir(\"results\"):\n",
    "    os.mkdir(\"results\")\n",
    "\n",
    "if not os.path.isdir(\"logs\"):\n",
    "    os.mkdir(\"logs\")\n",
    "\n",
    "if not os.path.isdir(\"data\"):\n",
    "    os.mkdir(\"data\")\n",
    "\n",
    "# load the data\n",
    "data = load_imdb_data(N_WORDS, SEQUENCE_LENGTH, TEST_SIZE, oov_token=OOV_TOKEN)\n",
    "# data = load_20_newsgroup_data(N_WORDS, SEQUENCE_LENGTH, TEST_SIZE, oov_token=OOV_TOKEN)\n",
    "\n",
    "# save the tokenizer object to use later in testing\n",
    "# pickle.dump(data[\"tokenizer\"], open(f\"results/{model_name}_tokenizer.pickle\", \"wb\"))\n",
    "\n",
    "model = create_model(data[\"tokenizer\"].word_index, units=UNITS, n_layers=N_LAYERS, \n",
    "                    cell=RNN_CELL, bidirectional=IS_BIDIRECTIONAL, embedding_size=EMBEDDING_SIZE, \n",
    "                    sequence_length=SEQUENCE_LENGTH, dropout=DROPOUT, \n",
    "                    loss=LOSS, optimizer=OPTIMIZER, output_length=data[\"y_train\"][0].shape[0])\n",
    "\n",
    "# checkpointer = ModelCheckpoint(os.path.join(\"results\", model_name), \n",
    "#                                 save_weights_only=True, save_best_only=True, \n",
    "#                                 verbose=1)\n",
    "model.summary()\n",
    "\n",
    "tensorboard = TensorBoard(log_dir=os.path.join(\"logs\", model_name))\n",
    "\n",
    "history = model.fit(data[\"X_train\"], data[\"y_train\"],\n",
    "                    batch_size=BATCH_SIZE,\n",
    "                    epochs=EPOCHS,\n",
    "                    validation_data=(data[\"X_test\"], data[\"y_test\"]),\n",
    "                    # callbacks=[checkpointer, tensorboard],\n",
    "                    callbacks=[tensorboard],\n",
    "                    verbose=1)\n",
    "\n",
    "\n",
    "model.save(os.path.join(\"results\", model_name) + \".h5\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_predictions(text):\n",
    "    sequence = data[\"tokenizer\"].texts_to_sequences([text])\n",
    "    # pad the sequences\n",
    "    sequence = pad_sequences(sequence, maxlen=SEQUENCE_LENGTH)\n",
    "    # get the prediction\n",
    "    prediction = model.predict(sequence)[0]\n",
    "    return prediction, data[\"int2label\"][np.argmax(prediction)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": "==================================================\nOutput vector: [0.38528103 0.61471903]\nPrediction: positive\n"
    }
   ],
   "source": [
    "text = \"Not very good, but pretty good try.\"\n",
    "output_vector, prediction = get_predictions(text)\n",
    "print(\"=\"*50)\n",
    "print(\"Output vector:\", output_vector)\n",
    "print(\"Prediction:\", prediction)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ]
}