{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "Hindi Model 5E CL CSV Scheduled Sampling.ipynb",
      "provenance": [],
      "collapsed_sections": [
        "CI_dHFzBG33S",
        "nPshHzWkGy9G",
        "Ifr6xPCCH-6r",
        "oeWwMVL0Ic2r",
        "oapGiq-KLiFL",
        "XpqwdCAVK5uE",
        "Fxvwy-T5KvIz",
        "y4EKS0e5IsEB",
        "f5lP9ZHFMLlD",
        "Vl42_ExqL17l",
        "HArp8MJDMePU",
        "HyX9o0teI4fb",
        "0CT_2nWdJNnD",
        "-BUkkzzj20LP",
        "e7ViuuP3IIeL"
      ]
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "BrVBK1BuGuXT",
        "colab_type": "text"
      },
      "source": [
        "https://github.com/yaserkl/RLSeq2Seq/blob/master/src/run_summarization.py\n",
        "\n",
        "\n",
        "\n",
        "```\n",
        "@article{keneshloo2018deep,\n",
        " title={Deep Reinforcement Learning For Sequence to Sequence Models},\n",
        " author={Keneshloo, Yaser and Shi, Tian and Ramakrishnan, Naren and Reddy, Chandan K.},\n",
        " journal={arXiv preprint arXiv:1805.09461},\n",
        " year={2018}\n",
        "}\n",
        "```\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Lx4W7ksuGwUT",
        "colab_type": "text"
      },
      "source": [
        "## Helpers (Google Drive)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "scSsHjmDGzrK",
        "colab_type": "text"
      },
      "source": [
        "### Google Drive"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1bpp_81k95QL",
        "colab_type": "code",
        "outputId": "c931b3fd-4995-47bd-b98a-156c35152ace",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 122
        }
      },
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/drive')"
      ],
      "execution_count": 1,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n",
            "\n",
            "Enter your authorization code:\n",
            "··········\n",
            "Mounted at /content/drive\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "kmLl5jIJb_hH",
        "colab_type": "code",
        "outputId": "369c7d0d-e9d6-4cf4-b97a-a3d2ae84135d",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        }
      },
      "source": [
        "!ls"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "drive  sample_data\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "gR-ZiNFACW46",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "default_path = \"drive/My Drive/Hindi_News/\"\n",
        "\n",
        "#-------------------------------save/load--------------------------------------#\n",
        "pickle_path = default_path + \"pickles/\"\n",
        "log_file = default_path + \"logs.txt\"\n",
        "csv_file = default_path + \"logs.csv\"\n",
        "\n",
        "tensorflow_log_file = default_path + 'tensorflow3.log'\n",
        "\n",
        "log_file_handler = open(log_file,\"a\")\n",
        "csv_file_handler = open(csv_file,\"a\")\n",
        "\n",
        "file1 = open(log_file , \"w+\")\n",
        "file2 = open(csv_file , \"w+\")\n",
        "\n",
        "import pickle\n",
        "\n",
        "def save(obj , filename):\n",
        "  print(\"saving {} ..\".format(filename))\n",
        "  with open(filename, 'wb') as handle:\n",
        "      pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)\n",
        "      \n",
        "def load(filename):\n",
        "  print(\"loading {} ..\".format(filename))\n",
        "  with open(filename, 'rb') as handle:\n",
        "    return pickle.load(handle)\n",
        "#-----------------------------------------------------------------------------------#  "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "GqDmhQeuS81V",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "with open(tensorflow_log_file , \"r\") as reader:\n",
        "  print(reader.read())"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "VOhw4MViOlgb",
        "colab_type": "text"
      },
      "source": [
        "### Requirements"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "OSb-2ufSOrAC",
        "colab_type": "text"
      },
      "source": [
        "https://github.com/yaserkl/RLSeq2Seq/blob/master/python_requirements.txt"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "9qyq4XU7OnrC",
        "colab_type": "code",
        "outputId": "2dac8511-8b11-40bd-820b-6f66f50ac00d",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 578
        }
      },
      "source": [
        "!pip install PyYAML\n",
        "!pip install spacy\n",
        "!pip install scikit-learn\n",
        "!pip install nltk\n",
        "!pip install pyrouge"
      ],
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Requirement already satisfied: PyYAML in /usr/local/lib/python3.6/dist-packages (3.13)\n",
            "Requirement already satisfied: spacy in /usr/local/lib/python3.6/dist-packages (2.1.9)\n",
            "Requirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.6/dist-packages (from spacy) (2.21.0)\n",
            "Requirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.6/dist-packages (from spacy) (1.17.4)\n",
            "Requirement already satisfied: plac<1.0.0,>=0.9.6 in /usr/local/lib/python3.6/dist-packages (from spacy) (0.9.6)\n",
            "Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from spacy) (2.0.3)\n",
            "Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.6/dist-packages (from spacy) (1.0.2)\n",
            "Requirement already satisfied: srsly<1.1.0,>=0.0.6 in /usr/local/lib/python3.6/dist-packages (from spacy) (0.2.0)\n",
            "Requirement already satisfied: wasabi<1.1.0,>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from spacy) (0.4.2)\n",
            "Requirement already satisfied: blis<0.3.0,>=0.2.2 in /usr/local/lib/python3.6/dist-packages (from spacy) (0.2.4)\n",
            "Requirement already satisfied: thinc<7.1.0,>=7.0.8 in /usr/local/lib/python3.6/dist-packages (from spacy) (7.0.8)\n",
            "Requirement already satisfied: preshed<2.1.0,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from spacy) (2.0.1)\n",
            "Requirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy) (1.24.3)\n",
            "Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy) (3.0.4)\n",
            "Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy) (2.8)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.13.0->spacy) (2019.11.28)\n",
            "Requirement already satisfied: tqdm<5.0.0,>=4.10.0 in /usr/local/lib/python3.6/dist-packages (from thinc<7.1.0,>=7.0.8->spacy) (4.28.1)\n",
            "Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (0.21.3)\n",
            "Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn) (0.14.0)\n",
            "Requirement already satisfied: scipy>=0.17.0 in /usr/local/lib/python3.6/dist-packages (from scikit-learn) (1.3.3)\n",
            "Requirement already satisfied: numpy>=1.11.0 in /usr/local/lib/python3.6/dist-packages (from scikit-learn) (1.17.4)\n",
            "Requirement already satisfied: nltk in /usr/local/lib/python3.6/dist-packages (3.2.5)\n",
            "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from nltk) (1.12.0)\n",
            "Collecting pyrouge\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/11/85/e522dd6b36880ca19dcf7f262b22365748f56edc6f455e7b6a37d0382c32/pyrouge-0.1.3.tar.gz (60kB)\n",
            "\u001b[K     |████████████████████████████████| 61kB 1.9MB/s \n",
            "\u001b[?25hBuilding wheels for collected packages: pyrouge\n",
            "  Building wheel for pyrouge (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "  Created wheel for pyrouge: filename=pyrouge-0.1.3-cp36-none-any.whl size=191613 sha256=e7cd4ead6e18a0a94f3bba55796258de5207f5a5dc6953199bfda226eddae89b\n",
            "  Stored in directory: /root/.cache/pip/wheels/75/d3/0c/e5b04e15b6b87c42e980de3931d2686e14d36e045058983599\n",
            "Successfully built pyrouge\n",
            "Installing collected packages: pyrouge\n",
            "Successfully installed pyrouge-0.1.3\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "rgCuTT0PZ8wq",
        "colab_type": "code",
        "outputId": "1b1ebcdd-bf03-4cce-f2a8-82d59a92a004",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 68
        }
      },
      "source": [
        "import nltk\n",
        "nltk.download('punkt')"
      ],
      "execution_count": 4,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "[nltk_data] Downloading package punkt to /root/nltk_data...\n",
            "[nltk_data]   Unzipping tokenizers/punkt.zip.\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "True"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 4
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "CI_dHFzBG33S",
        "colab_type": "text"
      },
      "source": [
        "## Model Helpers"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "nPshHzWkGy9G",
        "colab_type": "text"
      },
      "source": [
        "### Progress Bar"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "2HfUeKZkG1EC",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "from __future__ import print_function\n",
        "\n",
        "\n",
        "import json\n",
        "import os\n",
        "import pandas\n",
        "import io\n",
        "import sys\n",
        "import re\n",
        "\n",
        "\n",
        "class ProgressBar(object):\n",
        "    DEFAULT = 'Progress: %(bar)s %(percent)3d%%'\n",
        "    FULL = '%(bar)s %(current)d/%(total)d (%(percent)3d%%) %(remaining)d to go'\n",
        "\n",
        "    def __init__(self, total, width=40, fmt=DEFAULT, symbol='=',\n",
        "                 output=sys.stderr):\n",
        "        assert len(symbol) == 1\n",
        "\n",
        "        self.total = total\n",
        "        self.width = width\n",
        "        self.symbol = symbol\n",
        "        self.output = output\n",
        "        self.fmt = re.sub(r'(?P<name>%\\(.+?\\))d',\n",
        "            r'\\g<name>%dd' % len(str(total)), fmt)\n",
        "\n",
        "        self.current = 0\n",
        "\n",
        "    def __call__(self):\n",
        "        percent = self.current / float(self.total)\n",
        "        size = int(self.width * percent)\n",
        "        remaining = self.total - self.current\n",
        "        bar = '[' + self.symbol * size + ' ' * (self.width - size) + ']'\n",
        "\n",
        "        args = {\n",
        "            'total': self.total,\n",
        "            'bar': bar,\n",
        "            'current': self.current,\n",
        "            'percent': percent * 100,\n",
        "            'remaining': remaining\n",
        "        }\n",
        "        print('\\r' + self.fmt % args, file=self.output, end='')\n",
        "\n",
        "    def done(self):\n",
        "        self.current = self.total\n",
        "        self()\n",
        "        print('', file=self.output)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Ifr6xPCCH-6r",
        "colab_type": "text"
      },
      "source": [
        "### Vocab"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "T27x4AEeIC67",
        "colab_type": "text"
      },
      "source": [
        "https://github.com/yaserkl/RLSeq2Seq/blob/master/src/data.py"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "vuHqMQ0xIDMy",
        "colab_type": "code",
        "outputId": "c0f31866-b46e-4654-bee9-82dff01efb5c",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 0
        }
      },
      "source": [
        "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n",
        "# Modifications Copyright 2017 Abigail See\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#     http://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License.\n",
        "# ==============================================================================\n",
        "\n",
        "\"\"\"This file contains code to read the train/eval/test data from file and process it, and read the vocab data from file and process it\"\"\"\n",
        "\n",
        "import glob\n",
        "import random\n",
        "import struct\n",
        "import csv\n",
        "from tensorflow.core.example import example_pb2\n",
        "import operator\n",
        "import numpy as np\n",
        "np.random.seed(123)\n",
        "\n",
        "# <s> and </s> are used in the data files to segment the abstracts into sentences. They don't receive vocab ids.\n",
        "SENTENCE_START = '<s>'\n",
        "SENTENCE_END = '</s>'\n",
        "\n",
        "PAD_TOKEN = '[PAD]' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence\n",
        "UNKNOWN_TOKEN = '[UNK]' # This has a vocab id, which is used to represent out-of-vocabulary words\n",
        "START_DECODING = '[START]' # This has a vocab id, which is used at the start of every decoder input sequence\n",
        "STOP_DECODING = '[STOP]' # This has a vocab id, which is used at the end of untruncated target sequences\n",
        "\n",
        "# Note: none of <s>, </s>, [PAD], [UNK], [START], [STOP] should appear in the vocab file.\n",
        "\n",
        "\n",
        "class Vocab(object):\n",
        "  \"\"\"Vocabulary class for mapping between words and ids (integers)\"\"\"\n",
        "\n",
        "  def __init__(self, vocab_file, max_size):\n",
        "    \"\"\"Creates a vocab of up to max_size words, reading from the vocab_file. If max_size is 0, reads the entire vocab file.\n",
        "    Args:\n",
        "      vocab_file: path to the vocab file, which is assumed to contain \"<word> <frequency>\" on each line, sorted with most frequent word first. This code doesn't actually use the frequencies, though.\n",
        "      max_size: integer. The maximum size of the resulting Vocabulary.\"\"\"\n",
        "    self._word_to_id = {}\n",
        "    self._id_to_word = {}\n",
        "    self._count = 0 # keeps track of total number of words in the Vocab\n",
        "\n",
        "    # [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.\n",
        "    for w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n",
        "      self._word_to_id[w.lower()] = self._count\n",
        "      self._id_to_word[self._count] = w.lower()\n",
        "      self._count += 1\n",
        "\n",
        "    # Read the vocab file and add words up to max_size\n",
        "    with open(vocab_file, 'r') as vocab_f:\n",
        "      for line in vocab_f:\n",
        "        pieces = line.split()\n",
        "        if len(pieces) != 2:\n",
        "          print('Warning: incorrectly formatted line in vocabulary file: %s\\n' % line)\n",
        "          continue\n",
        "        w = pieces[0].lower()\n",
        "        if w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n",
        "          raise Exception('<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\\'t be in the vocab file, but %s is' % w)\n",
        "        if w in self._word_to_id:\n",
        "          print(\"Duplicate:\",w)\n",
        "          continue\n",
        "          raise Exception('Duplicated word in vocabulary file: %s' % w)\n",
        "        self._word_to_id[w] = self._count\n",
        "        self._id_to_word[self._count] = w\n",
        "        self._count += 1\n",
        "        if max_size != 0 and self._count >= max_size:\n",
        "          print(\"max_size of vocab was specified as %i; we now have %i words. Stopping reading.\" % (max_size, self._count))\n",
        "          break\n",
        "\n",
        "    #print(\"Finished constructing vocabulary of %i total words. Last word added: %s\" % (self._count, self._id_to_word[self._count-1]))\n",
        "\n",
        "  def word2id(self, word):\n",
        "    \"\"\"Returns the id (integer) of a word (string). Returns [UNK] id if word is OOV.\"\"\"\n",
        "    if word not in self._word_to_id:\n",
        "      return self._word_to_id[UNKNOWN_TOKEN.lower()]\n",
        "    return self._word_to_id[word.lower()]\n",
        "\n",
        "  def id2word(self, word_id):\n",
        "    \"\"\"Returns the word (string) corresponding to an id (integer).\"\"\"\n",
        "    if word_id not in self._id_to_word:\n",
        "      raise ValueError('Id not found in vocab: %d' % word_id)\n",
        "    return self._id_to_word[word_id]\n",
        "\n",
        "  def size(self):\n",
        "    \"\"\"Returns the total size of the vocabulary\"\"\"\n",
        "    return self._count\n",
        "\n",
        "  def write_metadata(self, fpath):\n",
        "    \"\"\"Writes metadata file for Tensorboard word embedding visualizer as described here:\n",
        "      https://www.tensorflow.org/get_started/embedding_viz\n",
        "    Args:\n",
        "      fpath: place to write the metadata file\n",
        "    \"\"\"\n",
        "    print(\"Writing word embedding metadata file to %s...\" % (fpath))\n",
        "    with open(fpath, \"w\") as f:\n",
        "      fieldnames = ['word']\n",
        "      writer = csv.DictWriter(f,delimiter=str(u\"\\t\").encode('utf-8') , fieldnames=fieldnames)\n",
        "      for i in range(self.size()):\n",
        "        writer.writerow({\"word\": self._id_to_word[i]})\n",
        "\n",
        "  def LoadWordEmbedding(self, w2v_file, word_dim):\n",
        "    self.wordDict = {}\n",
        "    self.word_dim = word_dim\n",
        "\n",
        "    self.wordDict[UNKNOWN_TOKEN] = np.zeros(self.word_dim,dtype=np.float32)\n",
        "    self.wordDict[PAD_TOKEN] = np.random.uniform(-1,1,self.word_dim)\n",
        "    self.wordDict[START_DECODING] = np.random.uniform(-1,1,self.word_dim)\n",
        "    self.wordDict[STOP_DECODING] = np.random.uniform(-1,1,self.word_dim)\n",
        "    ##with open(w2v_file) as wf:\n",
        "    ##  for line in wf:\n",
        "    ##    info = line.strip().split()\n",
        "    ##    word = info[0]\n",
        "    ##    coef = np.asarray(info[1:], dtype='float32')\n",
        "    ##    self.wordDict[word] = coef\n",
        "    ##    assert self.word_dim == len(coef)\n",
        "    #for a in embeddings_index.keys():\n",
        "    #  self.wordDict[a] = embeddings_index[a]\n",
        "      \n",
        "      \n",
        "    import gensim\n",
        "    from gensim.models.keyedvectors import KeyedVectors\n",
        "\n",
        "    #--------------------------------------my model-------------------------------------------#\n",
        "    print('Loading my Model ..')\n",
        "    model = KeyedVectors.load(w2v_file, mmap='r')\n",
        "    print('Loading Done el7 !!')\n",
        "    progress = ProgressBar(len(model.vocab), fmt=ProgressBar.FULL)\n",
        "\n",
        "    for word in model.wv.vocab:\n",
        "          embedding = np.asarray(model[word], dtype='float32')\n",
        "          self.wordDict[word] = embedding\n",
        "          progress.current += 1\n",
        "          progress()\n",
        "    progress.done()\n",
        "    print('\\n Word embeddings:', len(self.wordDict))\n",
        "    print('\\n')\n",
        "\n",
        "    self.MakeWordEmbedding()\n",
        "\n",
        "  def MakeWordEmbedding(self):\n",
        "    sorted_x = sorted(self._word_to_id.items(), key=operator.itemgetter(1))\n",
        "    self._wordEmbedding = np.zeros((self.size(), self.word_dim),dtype=np.float32) # replace unknown words with UNKNOWN_TOKEN embedding (zero vector)\n",
        "    for word,i in sorted_x:\n",
        "      if word in self.wordDict:\n",
        "        self._wordEmbedding[i,:] = self.wordDict[word.lower()]\n",
        "    print('Word Embedding Reading done.')\n",
        "\n",
        "  def getWordEmbedding(self):\n",
        "    print('getWordEmbedding.')\n",
        "    return self._wordEmbedding\n",
        "\n",
        "def example_generator(data_path, single_pass):\n",
        "  \"\"\"Generates tf.Examples from data files.\n",
        "    Binary data format: <length><blob>. <length> represents the byte size\n",
        "    of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains\n",
        "    the tokenized article text and summary.\n",
        "  Args:\n",
        "    data_path:\n",
        "      Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all.\n",
        "    single_pass:\n",
        "      Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely.\n",
        "  Yields:\n",
        "    Deserialized tf.Example.\n",
        "  \"\"\"\n",
        "  while True:\n",
        "    import glob\n",
        "    filelist = glob.glob(data_path)\n",
        "    #filelist = glob.glob(data_path) # get the list of datafiles\n",
        "    #assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty\n",
        "    if single_pass:\n",
        "      filelist = sorted(filelist)\n",
        "    else:\n",
        "      random.shuffle(filelist)\n",
        "    for f in filelist:\n",
        "      reader = open(f, 'rb')\n",
        "      while True:\n",
        "        len_bytes = reader.read(8)\n",
        "        if not len_bytes: break # finished reading this file\n",
        "        str_len = struct.unpack('q', len_bytes)[0]\n",
        "        example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]\n",
        "        yield example_pb2.Example.FromString(example_str)\n",
        "    if single_pass:\n",
        "      print(\"example_generator completed reading all datafiles. No more data.\")\n",
        "      break\n",
        "\n",
        "\n",
        "def article2ids(article_words, vocab):\n",
        "  \"\"\"Map the article words to their ids. Also return a list of OOVs in the article.\n",
        "  Args:\n",
        "    article_words: list of words (strings)\n",
        "    vocab: Vocabulary object\n",
        "  Returns:\n",
        "    ids:\n",
        "      A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.\n",
        "    oovs:\n",
        "      A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers.\"\"\"\n",
        "  ids = []\n",
        "  oovs = []\n",
        "  unk_id = vocab.word2id(UNKNOWN_TOKEN)\n",
        "  for w in article_words:\n",
        "    i = vocab.word2id(w)\n",
        "    if i == unk_id: # If w is OOV\n",
        "      if w not in oovs: # Add to list of OOVs\n",
        "        oovs.append(w)\n",
        "      oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV...\n",
        "      ids.append(vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second...\n",
        "    else:\n",
        "      ids.append(i)\n",
        "  return ids, oovs\n",
        "\n",
        "\n",
        "def abstract2ids(abstract_words, vocab, article_oovs):\n",
        "  \"\"\"Map the abstract words to their ids. In-article OOVs are mapped to their temporary OOV numbers.\n",
        "  Args:\n",
        "    abstract_words: list of words (strings)\n",
        "    vocab: Vocabulary object\n",
        "    article_oovs: list of in-article OOV words (strings), in the order corresponding to their temporary article OOV numbers\n",
        "  Returns:\n",
        "    ids: List of ids (integers). In-article OOV words are mapped to their temporary OOV numbers. Out-of-article OOV words are mapped to the UNK token id.\"\"\"\n",
        "  ids = []\n",
        "  unk_id = vocab.word2id(UNKNOWN_TOKEN)\n",
        "  for w in abstract_words:\n",
        "    i = vocab.word2id(w)\n",
        "    if i == unk_id: # If w is an OOV word\n",
        "      if w in article_oovs: # If w is an in-article OOV\n",
        "        vocab_idx = vocab.size() + article_oovs.index(w) # Map to its temporary article OOV number\n",
        "        ids.append(vocab_idx)\n",
        "      else: # If w is an out-of-article OOV\n",
        "        ids.append(unk_id) # Map to the UNK token id\n",
        "    else:\n",
        "      ids.append(i)\n",
        "  return ids\n",
        "\n",
        "\n",
        "def outputids2words(id_list, vocab, article_oovs):\n",
        "  \"\"\"Maps output ids to words, including mapping in-article OOVs from their temporary ids to the original OOV string (applicable in pointer-generator mode).\n",
        "  Args:\n",
        "    id_list: list of ids (integers)\n",
        "    vocab: Vocabulary object\n",
        "    article_oovs: list of OOV words (strings) in the order corresponding to their temporary article OOV ids (that have been assigned in pointer-generator mode), or None (in baseline mode)\n",
        "  Returns:\n",
        "    words: list of words (strings)\n",
        "  \"\"\"\n",
        "  words = []\n",
        "  for i in id_list:\n",
        "    try:\n",
        "      w = vocab.id2word(i) # might be [UNK]\n",
        "    except ValueError as e: # w is OOV\n",
        "      assert article_oovs is not None, \"Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode\"\n",
        "      article_oov_idx = i - vocab.size()\n",
        "      try:\n",
        "        w = article_oovs[article_oov_idx]\n",
        "      except ValueError as e: # i doesn't correspond to an article oov\n",
        "        raise ValueError('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs)))\n",
        "    words.append(w)\n",
        "  return words\n",
        "\n",
        "\n",
        "def abstract2sents(abstract):\n",
        "  \"\"\"Splits abstract text from datafile into list of sentences.\n",
        "  Args:\n",
        "    abstract: string containing <s> and </s> tags for starts and ends of sentences\n",
        "  Returns:\n",
        "    sents: List of sentence strings (no tags)\"\"\"\n",
        "  abstract = abstract.decode(encoding=\"utf-8\", errors=\"strict\")\n",
        "  cur = 0\n",
        "  sents = []\n",
        "  while True:\n",
        "    try:\n",
        "      # print(\"SENCENCE TYPE:\",type(SENTENCE_START))\n",
        "      # print(\"CUR TYPE:\", type(cur))\n",
        "      # print(\"ABSTRACT\", type(abstract))\n",
        "      start_p = abstract.index(SENTENCE_START,cur)\n",
        "      end_p = abstract.index(SENTENCE_END, start_p + 1)\n",
        "      cur = end_p + len(SENTENCE_END)\n",
        "      sents.append(abstract[start_p+len(SENTENCE_START):end_p])\n",
        "    except ValueError as e: # no more sentences\n",
        "      return sents\n",
        "\n",
        "\n",
        "def show_art_oovs(article, vocab):\n",
        "  \"\"\"Returns the article string, highlighting the OOVs by placing __underscores__ around them\"\"\"\n",
        "  unk_token = vocab.word2id(UNKNOWN_TOKEN)\n",
        "  words = article.split(' ')\n",
        "  words = [(\"__%s__\" % w) if vocab.word2id(w)==unk_token else w for w in words]\n",
        "  out_str = ' '.join(words)\n",
        "  return out_str\n",
        "\n",
        "\n",
        "def show_abs_oovs(abstract, vocab, article_oovs):\n",
        "  \"\"\"Returns the abstract string, highlighting the article OOVs with __underscores__.\n",
        "  If a list of article_oovs is provided, non-article OOVs are differentiated like !!__this__!!.\n",
        "  Args:\n",
        "    abstract: string\n",
        "    vocab: Vocabulary object\n",
        "    article_oovs: list of words (strings), or None (in baseline mode)\n",
        "  \"\"\"\n",
        "  unk_token = vocab.word2id(UNKNOWN_TOKEN)\n",
        "  words = abstract.split(' ')\n",
        "  new_words = []\n",
        "  for w in words:\n",
        "    if vocab.word2id(w) == unk_token: # w is oov\n",
        "      if article_oovs is None: # baseline mode\n",
        "        new_words.append(\"__%s__\" % w)\n",
        "      else: # pointer-generator mode\n",
        "        if w in article_oovs:\n",
        "          new_words.append(\"__%s__\" % w)\n",
        "        else:\n",
        "          new_words.append(\"!!__%s__!!\" % w)\n",
        "    else: # w is in-vocab word\n",
        "      new_words.append(w)\n",
        "  out_str = ' '.join(new_words)\n",
        "  return out_str"
      ],
      "execution_count": 6,
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/html": [
              "<p style=\"color: red;\">\n",
              "The default version of TensorFlow in Colab will soon switch to TensorFlow 2.x.<br>\n",
              "We recommend you <a href=\"https://www.tensorflow.org/guide/migrate\" target=\"_blank\">upgrade</a> now \n",
              "or ensure your notebook will continue to use TensorFlow 1.x via the <code>%tensorflow_version 1.x</code> magic:\n",
              "<a href=\"https://colab.research.google.com/notebooks/tensorflow_version.ipynb\" target=\"_blank\">more info</a>.</p>\n"
            ],
            "text/plain": [
              "<IPython.core.display.HTML object>"
            ]
          },
          "metadata": {
            "tags": []
          }
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "oeWwMVL0Ic2r",
        "colab_type": "text"
      },
      "source": [
        "### Batcher"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "V493B7xoIeBs",
        "colab_type": "text"
      },
      "source": [
        "https://github.com/yaserkl/RLSeq2Seq/blob/master/src/batcher.py"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "uH-PxZaBIfQL",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n",
        "# Modifications Copyright 2017 Abigail See\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#     http://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License.\n",
        "# ==============================================================================\n",
        "\n",
        "\"\"\"This file contains code to process data into batches\"\"\"\n",
        "try:\n",
        "  import queue\n",
        "except:\n",
        "  import Queue as queue\n",
        "from random import shuffle\n",
        "from random import seed\n",
        "seed(123)\n",
        "from threading import Thread\n",
        "import time\n",
        "import numpy as np\n",
        "import tensorflow as tf\n",
        "#import data\n",
        "\n",
        "FLAGS = tf.app.flags.FLAGS\n",
        "\n",
        "class Example(object):\n",
        "  \"\"\"Class representing a train/val/test example for text summarization.\"\"\"\n",
        "\n",
        "  def __init__(self, article, abstract_sentences, vocab, hps):\n",
        "    \"\"\"Initializes the Example, performing tokenization and truncation to produce the encoder, decoder and target sequences, which are stored in self.\n",
        "    Args:\n",
        "      article: source text; a string. each token is separated by a single space.\n",
        "      abstract_sentences: list of strings, one per abstract sentence. In each sentence, each token is separated by a single space.\n",
        "      vocab: Vocabulary object\n",
        "      hps: hyperparameters\n",
        "    \"\"\"\n",
        "    self.hps = hps\n",
        "\n",
        "    # Get ids of special tokens\n",
        "    start_decoding = vocab.word2id(START_DECODING)\n",
        "    stop_decoding = vocab.word2id(STOP_DECODING)\n",
        "\n",
        "    # Process the article\n",
        "    article_words = article.split()\n",
        "    if len(article_words) > hps.max_enc_steps:\n",
        "      article_words = article_words[:hps.max_enc_steps]\n",
        "    self.enc_len = len(article_words) # store the length after truncation but before padding\n",
        "    self.enc_input = [vocab.word2id(w) for w in article_words] # list of word ids; OOVs are represented by the id for UNK token\n",
        "\n",
        "    # Process the abstract\n",
        "    abstract = abstract_sentences #' '.join(abstract_sentences) # string\n",
        "    abstract_words = abstract.split() # list of strings\n",
        "    abs_ids = [vocab.word2id(w) for w in abstract_words] # list of word ids; OOVs are represented by the id for UNK token\n",
        "\n",
        "    # Get the decoder input sequence and target sequence\n",
        "    self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, hps.max_dec_steps, start_decoding, stop_decoding)\n",
        "    self.dec_len = len(self.dec_input)\n",
        "\n",
        "    # If using pointer-generator mode, we need to store some extra info\n",
        "    if hps.pointer_gen:\n",
        "      # Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves\n",
        "      self.enc_input_extend_vocab, self.article_oovs = article2ids(article_words, vocab)\n",
        "\n",
        "      # Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id\n",
        "      abs_ids_extend_vocab = abstract2ids(abstract_words, vocab, self.article_oovs)\n",
        "\n",
        "      # Overwrite decoder target sequence so it uses the temp article OOV ids\n",
        "      _, self.target = self.get_dec_inp_targ_seqs(abs_ids_extend_vocab, hps.max_dec_steps, start_decoding, stop_decoding)\n",
        "\n",
        "    # Store the original strings\n",
        "    self.original_article = article\n",
        "    self.original_abstract = abstract\n",
        "    self.original_abstract_sents = abstract_sentences\n",
        "\n",
        "\n",
        "  def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):\n",
        "    \"\"\"Given the reference summary as a sequence of tokens, return the input sequence for the decoder, and the target sequence which we will use to calculate loss. The sequence will be truncated if it is longer than max_len. The input sequence must start with the start_id and the target sequence must end with the stop_id (but not if it's been truncated).\n",
        "    Args:\n",
        "      sequence: List of ids (integers)\n",
        "      max_len: integer\n",
        "      start_id: integer\n",
        "      stop_id: integer\n",
        "    Returns:\n",
        "      inp: sequence length <=max_len starting with start_id\n",
        "      target: sequence same length as input, ending with stop_id only if there was no truncation\n",
        "    \"\"\"\n",
        "    inp = [start_id] + sequence[:]\n",
        "    target = sequence[:]\n",
        "    if len(inp) > max_len: # truncate\n",
        "      inp = inp[:max_len]\n",
        "      target = target[:max_len] # no end_token\n",
        "    else: # no truncation\n",
        "      target.append(stop_id) # end token\n",
        "    assert len(inp) == len(target)\n",
        "    return inp, target\n",
        "\n",
        "\n",
        "  def pad_decoder_inp_targ(self, max_len, pad_id):\n",
        "    \"\"\"Pad decoder input and target sequences with pad_id up to max_len.\"\"\"\n",
        "    while len(self.dec_input) < max_len:\n",
        "      self.dec_input.append(pad_id)\n",
        "    while len(self.target) < max_len:\n",
        "      self.target.append(pad_id)\n",
        "\n",
        "\n",
        "  def pad_encoder_input(self, max_len, pad_id):\n",
        "    \"\"\"Pad the encoder input sequence with pad_id up to max_len.\"\"\"\n",
        "    while len(self.enc_input) < max_len:\n",
        "      self.enc_input.append(pad_id)\n",
        "    if self.hps.pointer_gen:\n",
        "      while len(self.enc_input_extend_vocab) < max_len:\n",
        "        self.enc_input_extend_vocab.append(pad_id)\n",
        "\n",
        "\n",
        "class Batch(object):\n",
        "  \"\"\"Class representing a minibatch of train/val/test examples for text summarization.\"\"\"\n",
        "\n",
        "  def __init__(self, example_list, hps, vocab):\n",
        "    \"\"\"Turns the example_list into a Batch object.\n",
        "    Args:\n",
        "       example_list: List of Example objects\n",
        "       hps: hyperparameters\n",
        "       vocab: Vocabulary object\n",
        "    \"\"\"\n",
        "    self.pad_id = vocab.word2id(PAD_TOKEN) # id of the PAD token used to pad sequences\n",
        "    self.init_encoder_seq(example_list, hps) # initialize the input to the encoder\n",
        "    self.init_decoder_seq(example_list, hps) # initialize the input and targets for the decoder\n",
        "    self.store_orig_strings(example_list) # store the original strings\n",
        "\n",
        "  def init_encoder_seq(self, example_list, hps):\n",
        "    \"\"\"Initializes the following:\n",
        "        self.enc_batch:\n",
        "          numpy array of shape (batch_size, <=max_enc_steps) containing integer ids (all OOVs represented by UNK id), padded to length of longest sequence in the batch\n",
        "        self.enc_lens:\n",
        "          numpy array of shape (batch_size) containing integers. The (truncated) length of each encoder input sequence (pre-padding).\n",
        "        self.enc_padding_mask:\n",
        "          numpy array of shape (batch_size, <=max_enc_steps), containing 1s and 0s. 1s correspond to real tokens in enc_batch and target_batch; 0s correspond to padding.\n",
        "      If hps.pointer_gen, additionally initializes the following:\n",
        "        self.max_art_oovs:\n",
        "          maximum number of in-article OOVs in the batch\n",
        "        self.art_oovs:\n",
        "          list of list of in-article OOVs (strings), for each example in the batch\n",
        "        self.enc_batch_extend_vocab:\n",
        "          Same as self.enc_batch, but in-article OOVs are represented by their temporary article OOV number.\n",
        "    \"\"\"\n",
        "    # Determine the maximum length of the encoder input sequence in this batch\n",
        "    max_enc_seq_len = max([ex.enc_len for ex in example_list])\n",
        "\n",
        "    # Pad the encoder input sequences up to the length of the longest sequence\n",
        "    for ex in example_list:\n",
        "      ex.pad_encoder_input(max_enc_seq_len, self.pad_id)\n",
        "\n",
        "    # Initialize the numpy arrays\n",
        "    # Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.\n",
        "    self.enc_batch = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)\n",
        "    self.enc_lens = np.zeros((hps.batch_size), dtype=np.int32)\n",
        "    self.enc_padding_mask = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.float32)\n",
        "\n",
        "    # Fill in the numpy arrays\n",
        "    for i, ex in enumerate(example_list):\n",
        "      self.enc_batch[i, :] = ex.enc_input[:]\n",
        "      self.enc_lens[i] = ex.enc_len\n",
        "      for j in range(ex.enc_len):\n",
        "        self.enc_padding_mask[i][j] = 1\n",
        "\n",
        "    # For pointer-generator mode, need to store some extra info\n",
        "    if hps.pointer_gen:\n",
        "      # Determine the max number of in-article OOVs in this batch\n",
        "      self.max_art_oovs = max([len(ex.article_oovs) for ex in example_list])\n",
        "      # Store the in-article OOVs themselves\n",
        "      self.art_oovs = [ex.article_oovs for ex in example_list]\n",
        "      # Store the version of the enc_batch that uses the article OOV ids\n",
        "      self.enc_batch_extend_vocab = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)\n",
        "      for i, ex in enumerate(example_list):\n",
        "        self.enc_batch_extend_vocab[i, :] = ex.enc_input_extend_vocab[:]\n",
        "\n",
        "  def init_decoder_seq(self, example_list, hps):\n",
        "    \"\"\"Initializes the following:\n",
        "        self.dec_batch:\n",
        "          numpy array of shape (batch_size, max_dec_steps), containing integer ids as input for the decoder, padded to max_dec_steps length.\n",
        "        self.target_batch:\n",
        "          numpy array of shape (batch_size, max_dec_steps), containing integer ids for the target sequence, padded to max_dec_steps length.\n",
        "        self.dec_padding_mask:\n",
        "          numpy array of shape (batch_size, max_dec_steps), containing 1s and 0s. 1s correspond to real tokens in dec_batch and target_batch; 0s correspond to padding.\n",
        "        \"\"\"\n",
        "    # Pad the inputs and targets\n",
        "    for ex in example_list:\n",
        "      ex.pad_decoder_inp_targ(hps.max_dec_steps, self.pad_id)\n",
        "\n",
        "    # Initialize the numpy arrays.\n",
        "    # Note: our decoder inputs and targets must be the same length for each batch (second dimension = max_dec_steps) because we do not use a dynamic_rnn for decoding. However I believe this is possible, or will soon be possible, with Tensorflow 1.0, in which case it may be best to upgrade to that.\n",
        "    self.dec_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)\n",
        "    self.target_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)\n",
        "    self.dec_padding_mask = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.float32)\n",
        "\n",
        "    # Fill in the numpy arrays\n",
        "    for i, ex in enumerate(example_list):\n",
        "      self.dec_batch[i, :] = ex.dec_input[:]\n",
        "      self.target_batch[i, :] = ex.target[:]\n",
        "      for j in range(ex.dec_len):\n",
        "        self.dec_padding_mask[i][j] = 1\n",
        "\n",
        "  def store_orig_strings(self, example_list):\n",
        "    \"\"\"Store the original article and abstract strings in the Batch object\"\"\"\n",
        "    self.original_articles = [ex.original_article for ex in example_list] # list of lists\n",
        "    self.original_abstracts = [ex.original_abstract for ex in example_list] # list of lists\n",
        "    self.original_abstracts_sents = [ex.original_abstract_sents for ex in example_list] # list of list of lists\n",
        "\n",
        "\n",
        "class Batcher(object):\n",
        "  \"\"\"A class to generate minibatches of data. Buckets examples together based on length of the encoder sequence.\"\"\"\n",
        "\n",
        "  BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold\n",
        "\n",
        "  def __init__(self, data_path,csv, vocab, hps, single_pass, decode_after):\n",
        "    \"\"\"Initialize the batcher. Start threads that process the data into batches.\n",
        "    Args:\n",
        "      data_path: tf.Example filepattern.\n",
        "      vocab: Vocabulary object\n",
        "      hps: hyperparameters\n",
        "      single_pass: If True, run through the dataset exactly once (useful for when you want to run evaluation on the dev or test set). Otherwise generate random batches indefinitely (useful for training).\n",
        "    \"\"\"\n",
        "    self._data_path = data_path\n",
        "    self._csv = csv\n",
        "    self._vocab = vocab\n",
        "    self._hps = hps\n",
        "    self._single_pass = single_pass\n",
        "    self._decode_after = decode_after\n",
        "\n",
        "    # Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched\n",
        "    self._batch_queue = queue.Queue(self.BATCH_QUEUE_MAX)\n",
        "    self._example_queue = queue.Queue(self.BATCH_QUEUE_MAX * self._hps.batch_size)\n",
        "\n",
        "    # Different settings depending on whether we're in single_pass mode or not\n",
        "    if single_pass:\n",
        "      self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once\n",
        "      self._num_batch_q_threads = 1  # just one thread to batch examples\n",
        "      self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing\n",
        "      self._finished_reading = False # this will tell us when we're finished reading the dataset\n",
        "    else:\n",
        "      self._num_example_q_threads = FLAGS.example_queue_threads # num threads to fill example queue\n",
        "      self._num_batch_q_threads = FLAGS.batch_queue_threads  # num threads to fill batch queue\n",
        "      self._bucketing_cache_size = FLAGS.bucketing_cache_size # how many batches-worth of examples to load into cache before bucketing\n",
        "\n",
        "    # Start the threads that load the queues\n",
        "    self._example_q_threads = []\n",
        "    for _ in range(self._num_example_q_threads):\n",
        "      self._example_q_threads.append(Thread(target=self.fill_example_queue))\n",
        "      self._example_q_threads[-1].daemon = True\n",
        "      self._example_q_threads[-1].start()\n",
        "    self._batch_q_threads = []\n",
        "    for _ in range(self._num_batch_q_threads):\n",
        "      self._batch_q_threads.append(Thread(target=self.fill_batch_queue))\n",
        "      self._batch_q_threads[-1].daemon = True\n",
        "      self._batch_q_threads[-1].start()\n",
        "\n",
        "    # Start a thread that watches the other threads and restarts them if they're dead\n",
        "    if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever\n",
        "      self._watch_thread = Thread(target=self.watch_threads)\n",
        "      self._watch_thread.daemon = True\n",
        "      self._watch_thread.start()\n",
        "\n",
        "  def next_batch(self):\n",
        "    \"\"\"Return a Batch from the batch queue.\n",
        "    If mode='decode' then each batch contains a single example repeated beam_size-many times; this is necessary for beam search.\n",
        "    Returns:\n",
        "      batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.\n",
        "    \"\"\"\n",
        "    # If the batch queue is empty, print a warning\n",
        "    if self._batch_queue.qsize() == 0:\n",
        "      tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())\n",
        "      if self._single_pass and self._finished_reading:\n",
        "        tf.logging.info(\"Finished reading dataset in single_pass mode.\")\n",
        "        return None\n",
        "\n",
        "    batch = self._batch_queue.get() # get the next Batch\n",
        "    return batch\n",
        "\n",
        "  def fill_example_queue(self):\n",
        "    \"\"\"Reads data from file and processes into Examples which are then placed into the example queue.\"\"\"\n",
        "\n",
        "    #input_gen = self.text_generator(example_generator(self._data_path, self._single_pass))\n",
        "    input_gen = self._csv\n",
        "    for index, row  in input_gen.iterrows():\n",
        "      #try:\n",
        "      #  (article, abstract) = input_gen.next() # read the next example from file. article and abstract are both strings.\n",
        "      #except StopIteration: # if there are no more examples:\n",
        "      #  tf.logging.info(\"The example generator for this example queue filling thread has exhausted data.\")\n",
        "      #  if self._single_pass:\n",
        "      #    tf.logging.info(\"single_pass mode is on, so we've finished reading dataset. This thread is stopping.\")\n",
        "      #    self._finished_reading = True\n",
        "      #    break\n",
        "      #  else:\n",
        "      #    raise Exception(\"single_pass mode is off but the example generator is out of data; error.\")\n",
        "\n",
        "      #abstract_sentences = [sent.strip() for sent in abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.\n",
        "      if (row['सेtext'] != \"\" and  row['title'] != \"\"):\n",
        "        article = row['सेtext'] \n",
        "        abstract_sentences = row['title'] \n",
        "        example = Example(article, abstract_sentences, self._vocab, self._hps) # Process into an Example.\n",
        "        self._example_queue.put(example) # place the Example in the example queue.\n",
        "\n",
        "  def fill_batch_queue(self):\n",
        "    \"\"\"Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue.\n",
        "    In decode mode, makes batches that each contain a single example repeated.\n",
        "    \"\"\"\n",
        "    while True:\n",
        "      if self._hps.mode != 'decode':\n",
        "        # Get bucketing_cache_size-many batches of Examples into a list, then sort\n",
        "        inputs = []\n",
        "        for _ in range(self._hps.batch_size * self._bucketing_cache_size):\n",
        "          inputs.append(self._example_queue.get())\n",
        "        inputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence\n",
        "\n",
        "        # Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.\n",
        "        batches = []\n",
        "        for i in range(0, len(inputs), self._hps.batch_size):\n",
        "          batches.append(inputs[i:i + self._hps.batch_size])\n",
        "        if not self._single_pass:\n",
        "          shuffle(batches)\n",
        "        for b in batches:  # each b is a list of Example objects\n",
        "          self._batch_queue.put(Batch(b, self._hps, self._vocab))\n",
        "\n",
        "      else: # beam search decode mode\n",
        "        ex = self._example_queue.get()\n",
        "        b = [ex for _ in range(self._hps.batch_size)]\n",
        "        self._batch_queue.put(Batch(b, self._hps, self._vocab))\n",
        "\n",
        "  def watch_threads(self):\n",
        "    \"\"\"Watch example queue and batch queue threads and restart if dead.\"\"\"\n",
        "    while True:\n",
        "      time.sleep(60)\n",
        "      for idx,t in enumerate(self._example_q_threads):\n",
        "        if not t.is_alive(): # if the thread is dead\n",
        "          tf.logging.error('Found example queue thread dead. Restarting.')\n",
        "          new_t = Thread(target=self.fill_example_queue)\n",
        "          self._example_q_threads[idx] = new_t\n",
        "          new_t.daemon = True\n",
        "          new_t.start()\n",
        "      for idx,t in enumerate(self._batch_q_threads):\n",
        "        if not t.is_alive(): # if the thread is dead\n",
        "          tf.logging.error('Found batch queue thread dead. Restarting.')\n",
        "          new_t = Thread(target=self.fill_batch_queue)\n",
        "          self._batch_q_threads[idx] = new_t\n",
        "          new_t.daemon = True\n",
        "          new_t.start()\n",
        "\n",
        "  def text_generator(self, example_generator):\n",
        "    \"\"\"Generates article and abstract text from tf.Example.\n",
        "    Args:\n",
        "      example_generator: a generator of tf.Examples from file. See data.example_generator\"\"\"\n",
        "    cnt = 0\n",
        "    while True:\n",
        "      e = next(example_generator) ##example_generator.next() # e is a tf.Example\n",
        "      try:\n",
        "        article_text = e.features.feature['article'].bytes_list.value[0] # the article text was saved under the key 'article' in the data files\n",
        "        abstract_text = e.features.feature['abstract'].bytes_list.value[0] # the abstract text was saved under the key 'abstract' in the data files\n",
        "      except ValueError:\n",
        "        tf.logging.error('Failed to get article or abstract from example')\n",
        "        continue\n",
        "      if len(article_text)==0: # See https://github.com/abisee/pointer-generator/issues/1\n",
        "        tf.logging.warning('Found an example with empty article text. Skipping it.')\n",
        "      else:\n",
        "        if self._single_pass and cnt < self._decode_after: #skip already decoded docs\n",
        "          cnt +=1\n",
        "          continue\n",
        "        yield (article_text, abstract_text)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "oapGiq-KLiFL",
        "colab_type": "text"
      },
      "source": [
        "### Rouge"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "d86tpuvTLjTD",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# -*- coding: utf-8 -*-\n",
        "# Copyright 2017 Google Inc.\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#      http://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License.\n",
        "\"\"\"ROUGe metric implementation.\n",
        "This is a modified and slightly extended verison of\n",
        "https://github.com/miso-belica/sumy/blob/dev/sumy/evaluation/rouge.py.\n",
        "\"\"\"\n",
        "\n",
        "from __future__ import absolute_import\n",
        "from __future__ import division\n",
        "from __future__ import print_function\n",
        "from __future__ import unicode_literals\n",
        "\n",
        "import itertools\n",
        "import numpy as np\n",
        "\n",
        "#pylint: disable=C0103\n",
        "\n",
        "\n",
        "def _get_ngrams(n, text):\n",
        "  \"\"\"Calcualtes n-grams.\n",
        "  Args:\n",
        "    n: which n-grams to calculate\n",
        "    text: An array of tokens\n",
        "  Returns:\n",
        "    A set of n-grams\n",
        "  \"\"\"\n",
        "  ngram_set = set()\n",
        "  text_length = len(text)\n",
        "  max_index_ngram_start = text_length - n\n",
        "  for i in range(max_index_ngram_start + 1):\n",
        "    ngram_set.add(tuple(text[i:i + n]))\n",
        "  return ngram_set\n",
        "\n",
        "def _split_into_words(sentences):\n",
        "  \"\"\"Splits multiple sentences into words and flattens the result\"\"\"\n",
        "  return list(itertools.chain(*[_.split(\" \") for _ in sentences]))\n",
        "\n",
        "def _get_word_ngrams(n, sentences):\n",
        "  \"\"\"Calculates word n-grams for multiple sentences.\n",
        "  \"\"\"\n",
        "  assert len(sentences) > 0\n",
        "  assert n > 0\n",
        "\n",
        "  words = _split_into_words(sentences)\n",
        "  return _get_ngrams(n, words)\n",
        "\n",
        "def _len_lcs(x, y):\n",
        "  \"\"\"\n",
        "  Returns the length of the Longest Common Subsequence between sequences x\n",
        "  and y.\n",
        "  Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n",
        "  Args:\n",
        "    x: sequence of words\n",
        "    y: sequence of words\n",
        "  Returns\n",
        "    integer: Length of LCS between x and y\n",
        "  \"\"\"\n",
        "  table = _lcs(x, y)\n",
        "  n, m = len(x), len(y)\n",
        "  return table[n, m]\n",
        "\n",
        "def _lcs(x, y):\n",
        "  \"\"\"\n",
        "  Computes the length of the longest common subsequence (lcs) between two\n",
        "  strings. The implementation below uses a DP programming algorithm and runs\n",
        "  in O(nm) time where n = len(x) and m = len(y).\n",
        "  Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n",
        "  Args:\n",
        "    x: collection of words\n",
        "    y: collection of words\n",
        "  Returns:\n",
        "    Table of dictionary of coord and len lcs\n",
        "  \"\"\"\n",
        "  n, m = len(x), len(y)\n",
        "  table = dict()\n",
        "  for i in range(n + 1):\n",
        "    for j in range(m + 1):\n",
        "      if i == 0 or j == 0:\n",
        "        table[i, j] = 0\n",
        "      elif x[i - 1] == y[j - 1]:\n",
        "        table[i, j] = table[i - 1, j - 1] + 1\n",
        "      else:\n",
        "        table[i, j] = max(table[i - 1, j], table[i, j - 1])\n",
        "  return table\n",
        "\n",
        "def _recon_lcs(x, y):\n",
        "  \"\"\"\n",
        "  Returns the Longest Subsequence between x and y.\n",
        "  Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n",
        "  Args:\n",
        "    x: sequence of words\n",
        "    y: sequence of words\n",
        "  Returns:\n",
        "    sequence: LCS of x and y\n",
        "  \"\"\"\n",
        "  i, j = len(x), len(y)\n",
        "  table = _lcs(x, y)\n",
        "\n",
        "  def _recon(i, j):\n",
        "    \"\"\"private recon calculation\"\"\"\n",
        "    if i == 0 or j == 0:\n",
        "      return []\n",
        "    elif x[i - 1] == y[j - 1]:\n",
        "      return _recon(i - 1, j - 1) + [(x[i - 1], i)]\n",
        "    elif table[i - 1, j] > table[i, j - 1]:\n",
        "      return _recon(i - 1, j)\n",
        "    else:\n",
        "      return _recon(i, j - 1)\n",
        "\n",
        "  recon_tuple = tuple(map(lambda x: x[0], _recon(i, j)))\n",
        "  return recon_tuple\n",
        "\n",
        "def rouge_n(evaluated_sentences, reference_sentences, n=2):\n",
        "  \"\"\"\n",
        "  Computes ROUGE-N of two text collections of sentences.\n",
        "  Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/\n",
        "  papers/rouge-working-note-v1.3.1.pdf\n",
        "  Args:\n",
        "    evaluated_sentences: The sentences that have been picked by the summarizer\n",
        "    reference_sentences: The sentences from the referene set\n",
        "    n: Size of ngram.  Defaults to 2.\n",
        "  Returns:\n",
        "    A tuple (f1, precision, recall) for ROUGE-N\n",
        "  Raises:\n",
        "    ValueError: raises exception if a param has len <= 0\n",
        "  \"\"\"\n",
        "  if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n",
        "    raise ValueError(\"Collections must contain at least 1 sentence.\")\n",
        "\n",
        "  evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)\n",
        "  reference_ngrams = _get_word_ngrams(n, reference_sentences)\n",
        "  reference_count = len(reference_ngrams)\n",
        "  evaluated_count = len(evaluated_ngrams)\n",
        "\n",
        "  # Gets the overlapping ngrams between evaluated and reference\n",
        "  overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)\n",
        "  overlapping_count = len(overlapping_ngrams)\n",
        "\n",
        "  # Handle edge case. This isn't mathematically correct, but it's good enough\n",
        "  if evaluated_count == 0:\n",
        "    precision = 0.0\n",
        "  else:\n",
        "    precision = overlapping_count / evaluated_count\n",
        "\n",
        "  if reference_count == 0:\n",
        "    recall = 0.0\n",
        "  else:\n",
        "    recall = overlapping_count / reference_count\n",
        "\n",
        "  f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))\n",
        "\n",
        "  # return overlapping_count / reference_count\n",
        "  return f1_score, precision, recall\n",
        "\n",
        "def _f_p_r_lcs(llcs, m, n):\n",
        "  \"\"\"\n",
        "  Computes the LCS-based F-measure score\n",
        "  Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/\n",
        "  rouge-working-note-v1.3.1.pdf\n",
        "  Args:\n",
        "    llcs: Length of LCS\n",
        "    m: number of words in reference summary\n",
        "    n: number of words in candidate summary\n",
        "  Returns:\n",
        "    Float. LCS-based F-measure score\n",
        "  \"\"\"\n",
        "  r_lcs = llcs / m\n",
        "  p_lcs = llcs / n\n",
        "  beta = p_lcs / (r_lcs + 1e-12)\n",
        "  num = (1 + (beta**2)) * r_lcs * p_lcs\n",
        "  denom = r_lcs + ((beta**2) * p_lcs)\n",
        "  f_lcs = num / (denom + 1e-12)\n",
        "  return f_lcs, p_lcs, r_lcs\n",
        "\n",
        "\n",
        "def rouge_l_sentence_level(evaluated_sentences, reference_sentences):\n",
        "  \"\"\"\n",
        "  Computes ROUGE-L (sentence level) of two text collections of sentences.\n",
        "  http://research.microsoft.com/en-us/um/people/cyl/download/papers/\n",
        "  rouge-working-note-v1.3.1.pdf\n",
        "  Calculated according to:\n",
        "  R_lcs = LCS(X,Y)/m\n",
        "  P_lcs = LCS(X,Y)/n\n",
        "  F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)\n",
        "  where:\n",
        "  X = reference summary\n",
        "  Y = Candidate summary\n",
        "  m = length of reference summary\n",
        "  n = length of candidate summary\n",
        "  Args:\n",
        "    evaluated_sentences: The sentences that have been picked by the summarizer\n",
        "    reference_sentences: The sentences from the referene set\n",
        "  Returns:\n",
        "    A float: F_lcs\n",
        "  Raises:\n",
        "    ValueError: raises exception if a param has len <= 0\n",
        "  \"\"\"\n",
        "  if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n",
        "    raise ValueError(\"Collections must contain at least 1 sentence.\")\n",
        "  reference_words = _split_into_words(reference_sentences)\n",
        "  evaluated_words = _split_into_words(evaluated_sentences)\n",
        "  m = len(reference_words)\n",
        "  n = len(evaluated_words)\n",
        "  lcs = _len_lcs(evaluated_words, reference_words)\n",
        "  return _f_p_r_lcs(lcs, m, n)\n",
        "\n",
        "\n",
        "def _union_lcs(evaluated_sentences, reference_sentence):\n",
        "  \"\"\"\n",
        "  Returns LCS_u(r_i, C) which is the LCS score of the union longest common\n",
        "  subsequence between reference sentence ri and candidate summary C. For example\n",
        "  if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8 and\n",
        "  c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1 is\n",
        "  “w1 w2” and the longest common subsequence of r_i and c2 is “w1 w3 w5”. The\n",
        "  union longest common subsequence of r_i, c1, and c2 is “w1 w2 w3 w5” and\n",
        "  LCS_u(r_i, C) = 4/5.\n",
        "  Args:\n",
        "    evaluated_sentences: The sentences that have been picked by the summarizer\n",
        "    reference_sentence: One of the sentences in the reference summaries\n",
        "  Returns:\n",
        "    float: LCS_u(r_i, C)\n",
        "  ValueError:\n",
        "    Raises exception if a param has len <= 0\n",
        "  \"\"\"\n",
        "  if len(evaluated_sentences) <= 0:\n",
        "    raise ValueError(\"Collections must contain at least 1 sentence.\")\n",
        "\n",
        "  lcs_union = set()\n",
        "  reference_words = _split_into_words([reference_sentence])\n",
        "  combined_lcs_length = 0\n",
        "  for eval_s in evaluated_sentences:\n",
        "    evaluated_words = _split_into_words([eval_s])\n",
        "    lcs = set(_recon_lcs(reference_words, evaluated_words))\n",
        "    combined_lcs_length += len(lcs)\n",
        "    lcs_union = lcs_union.union(lcs)\n",
        "\n",
        "  union_lcs_count = len(lcs_union)\n",
        "  union_lcs_value = union_lcs_count / combined_lcs_length\n",
        "  return union_lcs_value\n",
        "\n",
        "\n",
        "def rouge_l_summary_level(evaluated_sentences, reference_sentences):\n",
        "  \"\"\"\n",
        "  Computes ROUGE-L (summary level) of two text collections of sentences.\n",
        "  http://research.microsoft.com/en-us/um/people/cyl/download/papers/\n",
        "  rouge-working-note-v1.3.1.pdf\n",
        "  Calculated according to:\n",
        "  R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m\n",
        "  P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n\n",
        "  F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)\n",
        "  where:\n",
        "  SUM(i,u) = SUM from i through u\n",
        "  u = number of sentences in reference summary\n",
        "  C = Candidate summary made up of v sentences\n",
        "  m = number of words in reference summary\n",
        "  n = number of words in candidate summary\n",
        "  Args:\n",
        "    evaluated_sentences: The sentences that have been picked by the summarizer\n",
        "    reference_sentence: One of the sentences in the reference summaries\n",
        "  Returns:\n",
        "    A float: F_lcs\n",
        "  Raises:\n",
        "    ValueError: raises exception if a param has len <= 0\n",
        "  \"\"\"\n",
        "  if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n",
        "    raise ValueError(\"Collections must contain at least 1 sentence.\")\n",
        "\n",
        "  # total number of words in reference sentences\n",
        "  m = len(_split_into_words(reference_sentences))\n",
        "\n",
        "  # total number of words in evaluated sentences\n",
        "  n = len(_split_into_words(evaluated_sentences))\n",
        "\n",
        "  union_lcs_sum_across_all_references = 0\n",
        "  for ref_s in reference_sentences:\n",
        "    union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,\n",
        "                                                      ref_s)\n",
        "  return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)\n",
        "\n",
        "\n",
        "def rouge(hypotheses, references):\n",
        "  \"\"\"Calculates average rouge scores for a list of hypotheses and\n",
        "  references\"\"\"\n",
        "\n",
        "  # Filter out hyps that are of 0 length\n",
        "  # hyps_and_refs = zip(hypotheses, references)\n",
        "  # hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]\n",
        "  # hypotheses, references = zip(*hyps_and_refs)\n",
        "\n",
        "  # Calculate ROUGE-1 F1, precision, recall scores\n",
        "  rouge_1 = [\n",
        "      rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)\n",
        "  ]\n",
        "  rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))\n",
        "\n",
        "  # Calculate ROUGE-2 F1, precision, recall scores\n",
        "  rouge_2 = [\n",
        "      rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)\n",
        "  ]\n",
        "  rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))\n",
        "\n",
        "  # Calculate ROUGE-L F1, precision, recall scores\n",
        "  rouge_l = [\n",
        "      rouge_l_sentence_level([hyp], [ref])\n",
        "      for hyp, ref in zip(hypotheses, references)\n",
        "  ]\n",
        "  rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))\n",
        "\n",
        "  return {\n",
        "      \"rouge_1/f_score\": rouge_1_f,\n",
        "      \"rouge_1/r_score\": rouge_1_r,\n",
        "      \"rouge_1/p_score\": rouge_1_p,\n",
        "      \"rouge_2/f_score\": rouge_2_f,\n",
        "      \"rouge_2/r_score\": rouge_2_r,\n",
        "      \"rouge_2/p_score\": rouge_2_p,\n",
        "      \"rouge_l/f_score\": rouge_l_f,\n",
        "      \"rouge_l/r_score\": rouge_l_r,\n",
        "      \"rouge_l/p_score\": rouge_l_p,\n",
        "  }"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "XpqwdCAVK5uE",
        "colab_type": "text"
      },
      "source": [
        "### Rouge Tensor"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "YtrJ_qoPK7lb",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# coding=utf-8\n",
        "# Copyright 2018 The Tensor2Tensor Authors.\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#     http://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License.\n",
        "# coding=utf-8\n",
        "\"\"\"ROUGE metric implementation.\n",
        "This is a modified and slightly extended version of\n",
        "https://github.com/miso-belica/sumy/blob/dev/sumy/evaluation/rouge.py.\n",
        "\"\"\"\n",
        "\n",
        "from __future__ import absolute_import\n",
        "from __future__ import division\n",
        "from __future__ import print_function\n",
        "from __future__ import unicode_literals\n",
        "\n",
        "import numpy as np\n",
        "\n",
        "import tensorflow as tf\n",
        "\n",
        "\n",
        "def _len_lcs(x, y):\n",
        "  \"\"\"Returns the length of the Longest Common Subsequence between two seqs.\n",
        "  Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n",
        "  Args:\n",
        "    x: sequence of words\n",
        "    y: sequence of words\n",
        "  Returns\n",
        "    integer: Length of LCS between x and y\n",
        "  \"\"\"\n",
        "  table = _lcs(x, y)\n",
        "  n, m = len(x), len(y)\n",
        "  return table[n, m]\n",
        "\n",
        "\n",
        "def _lcs(x, y):\n",
        "  \"\"\"Computes the length of the LCS between two seqs.\n",
        "  The implementation below uses a DP programming algorithm and runs\n",
        "  in O(nm) time where n = len(x) and m = len(y).\n",
        "  Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n",
        "  Args:\n",
        "    x: collection of words\n",
        "    y: collection of words\n",
        "  Returns:\n",
        "    Table of dictionary of coord and len lcs\n",
        "  \"\"\"\n",
        "  n, m = len(x), len(y)\n",
        "  table = dict()\n",
        "  for i in range(n + 1):\n",
        "    for j in range(m + 1):\n",
        "      if i == 0 or j == 0:\n",
        "        table[i, j] = 0\n",
        "      elif x[i - 1] == y[j - 1]:\n",
        "        table[i, j] = table[i - 1, j - 1] + 1\n",
        "      else:\n",
        "        table[i, j] = max(table[i - 1, j], table[i, j - 1])\n",
        "  return table\n",
        "\n",
        "\n",
        "def _f_lcs(llcs, m, n):\n",
        "  \"\"\"Computes the LCS-based F-measure score.\n",
        "  Source: https://www.microsoft.com/en-us/research/publication/\n",
        "  rouge-a-package-for-automatic-evaluation-of-summaries/\n",
        "  Args:\n",
        "    llcs: Length of LCS\n",
        "    m: number of words in reference summary\n",
        "    n: number of words in candidate summary\n",
        "  Returns:\n",
        "    Float. LCS-based F-measure score\n",
        "  \"\"\"\n",
        "  r_lcs = llcs / m\n",
        "  p_lcs = llcs / n\n",
        "  beta = p_lcs / (r_lcs + 1e-12)\n",
        "  num = (1 + (beta**2)) * r_lcs * p_lcs\n",
        "  denom = r_lcs + ((beta**2) * p_lcs)\n",
        "  f_lcs = num / (denom + 1e-12)\n",
        "  return f_lcs\n",
        "\n",
        "\n",
        "def rouge_l_sentence_level(eval_sentences, ref_sentences):\n",
        "  \"\"\"Computes ROUGE-L (sentence level) of two collections of sentences.\n",
        "  Source: https://www.microsoft.com/en-us/research/publication/\n",
        "  rouge-a-package-for-automatic-evaluation-of-summaries/\n",
        "  Calculated according to:\n",
        "  R_lcs = LCS(X,Y)/m\n",
        "  P_lcs = LCS(X,Y)/n\n",
        "  F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)\n",
        "  where:\n",
        "  X = reference summary\n",
        "  Y = Candidate summary\n",
        "  m = length of reference summary\n",
        "  n = length of candidate summary\n",
        "  Args:\n",
        "    eval_sentences: The sentences that have been picked by the summarizer\n",
        "    ref_sentences: The sentences from the reference set\n",
        "  Returns:\n",
        "    A float: F_lcs\n",
        "  \"\"\"\n",
        "\n",
        "  f1_scores = []\n",
        "  for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):\n",
        "    m = len(ref_sentence)\n",
        "    n = len(eval_sentence)\n",
        "    lcs = _len_lcs(eval_sentence, ref_sentence)\n",
        "    f1_scores.append(_f_lcs(lcs, m, n))\n",
        "  return np.array(f1_scores).astype(np.float32)\n",
        "\n",
        "\n",
        "def rouge_l_fscore(hypothesis, references, **unused_kwargs):\n",
        "  \"\"\"ROUGE scores computation between labels and predictions.\n",
        "  This is an approximate ROUGE scoring method since we do not glue word pieces\n",
        "  or decode the ids and tokenize the output.\n",
        "  Args:\n",
        "    predictions: tensor, model predictions (batch_size, <=max_dec_steps)\n",
        "    labels: tensor, gold output. (batch_size, max_dec_steps)\n",
        "  Returns:\n",
        "    rouge_l_fscore: approx rouge-l f1 score.\n",
        "  \"\"\"\n",
        "  rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (hypothesis, references), [tf.float32])\n",
        "  return rouge_l_f_score\n",
        "\n",
        "\n",
        "def _get_ngrams(n, text):\n",
        "  \"\"\"Calculates n-grams.\n",
        "  Args:\n",
        "    n: which n-grams to calculate\n",
        "    text: An array of tokens\n",
        "  Returns:\n",
        "    A set of n-grams\n",
        "  \"\"\"\n",
        "  ngram_set = set()\n",
        "  text_length = len(text)\n",
        "  max_index_ngram_start = text_length - n\n",
        "  for i in range(max_index_ngram_start + 1):\n",
        "    ngram_set.add(tuple(text[i:i + n]))\n",
        "  return ngram_set\n",
        "\n",
        "\n",
        "def rouge_n(eval_sentences, ref_sentences, n=2):\n",
        "  \"\"\"Computes ROUGE-N f1 score of two text collections of sentences.\n",
        "  Source: https://www.microsoft.com/en-us/research/publication/\n",
        "  rouge-a-package-for-automatic-evaluation-of-summaries/\n",
        "  Args:\n",
        "    eval_sentences: The sentences that have been picked by the summarizer\n",
        "    ref_sentences: The sentences from the reference set\n",
        "    n: Size of ngram.  Defaults to 2.\n",
        "  Returns:\n",
        "    f1 score for ROUGE-N\n",
        "  \"\"\"\n",
        "\n",
        "  f1_scores = []\n",
        "  for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):\n",
        "    eval_ngrams = _get_ngrams(n, eval_sentence)\n",
        "    ref_ngrams = _get_ngrams(n, ref_sentence)\n",
        "    ref_count = len(ref_ngrams)\n",
        "    eval_count = len(eval_ngrams)\n",
        "\n",
        "    # Gets the overlapping ngrams between evaluated and reference\n",
        "    overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)\n",
        "    overlapping_count = len(overlapping_ngrams)\n",
        "\n",
        "    # Handle edge case. This isn't mathematically correct, but it's good enough\n",
        "    if eval_count == 0:\n",
        "      precision = 0.0\n",
        "    else:\n",
        "      precision = overlapping_count / eval_count\n",
        "\n",
        "    if ref_count == 0:\n",
        "      recall = 0.0\n",
        "    else:\n",
        "      recall = overlapping_count / ref_count\n",
        "\n",
        "    f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))\n",
        "\n",
        "  # return overlapping_count / reference_count\n",
        "  return np.array(f1_scores).astype(np.float32)\n",
        "\n",
        "\n",
        "def rouge_2_fscore(predictions, labels, **unused_kwargs):\n",
        "  \"\"\"ROUGE-2 F1 score computation between labels and predictions.\n",
        "  This is an approximate ROUGE scoring method since we do not glue word pieces\n",
        "  or decode the ids and tokenize the output.\n",
        "  Args:\n",
        "    predictions: tensor, model predictions (batch_size, <=max_dec_steps)\n",
        "    labels: tensor, gold output. (batch_size, max_dec_steps)\n",
        "  Returns:\n",
        "    rouge2_fscore: approx rouge-2 f1 score.\n",
        "  \"\"\"\n",
        "\n",
        "  rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), [tf.float32])\n",
        "  return rouge_2_f_score, tf.constant(1.0)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Fxvwy-T5KvIz",
        "colab_type": "text"
      },
      "source": [
        "### Attention Decoder"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XbSu87w-KxiM",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n",
        "# Modifications Copyright 2017 Abigail See\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#     http://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License.\n",
        "# ==============================================================================\n",
        "\n",
        "\"\"\"This file defines the decoder\"\"\"\n",
        "\n",
        "import tensorflow as tf\n",
        "from tensorflow.python.ops import variable_scope\n",
        "from tensorflow.python.ops import array_ops\n",
        "from tensorflow.python.ops import nn_ops\n",
        "from tensorflow.python.ops import gen_array_ops\n",
        "from tensorflow.python.ops import math_ops\n",
        "from tensorflow.python.ops.distributions import categorical\n",
        "from tensorflow.python.ops.distributions import bernoulli\n",
        "#from rouge_tensor import rouge_l_fscore\n",
        "\n",
        "FLAGS = tf.app.flags.FLAGS\n",
        "\n",
        "def print_shape(str, var):\n",
        "  tf.logging.info('shape of {}: {}'.format(str, [k for k in var.get_shape()]))\n",
        "\n",
        "import sys\n",
        "def add_epsilon(dist, epsilon=sys.float_info.epsilon):\n",
        "      epsilon_mask = tf.ones_like(dist) * epsilon\n",
        "      return dist + epsilon_mask\n",
        "    \n",
        "def _calc_final_dist(_hps, v_size, _max_art_oovs, _enc_batch_extend_vocab, p_gen, vocab_dist, attn_dist):\n",
        "  \"\"\"Calculate the final distribution, for the pointer-generator model\n",
        "  Args:\n",
        "    vocab_dists: The vocabulary distributions. List length max_dec_steps of (batch_size, vsize) arrays. The words are in the order they appear in the vocabulary file.\n",
        "    attn_dists: The attention distributions. List length max_dec_steps of (batch_size, max_enc_steps) arrays\n",
        "  Returns:\n",
        "    final_dists: The final distributions. List length max_dec_steps of (batch_size, extended_vsize) arrays.\n",
        "  \"\"\"\n",
        "  with tf.variable_scope('final_distribution'):\n",
        "    # Multiply vocab dists by p_gen and attention dists by (1-p_gen)\n",
        "    vocab_dist = p_gen * vocab_dist\n",
        "    attn_dist = (1-p_gen) * attn_dist\n",
        "\n",
        "    # Concatenate some zeros to each vocabulary dist, to hold the probabilities for in-article OOV words\n",
        "    extended_vsize = v_size + _max_art_oovs # the maximum (over the batch) size of the extended vocabulary\n",
        "    extra_zeros = tf.zeros((_hps.batch_size, _max_art_oovs))\n",
        "    vocab_dists_extended = tf.concat(axis=1, values=[vocab_dist, extra_zeros]) # list length max_dec_steps of shape (batch_size, extended_vsize)\n",
        "\n",
        "    # Project the values in the attention distributions onto the appropriate entries in the final distributions\n",
        "    # This means that if a_i = 0.1 and the ith encoder word is w, and w has index 500 in the vocabulary, then we add 0.1 onto the 500th entry of the final distribution\n",
        "    # This is done for each decoder timestep.\n",
        "    # This is fiddly; we use tf.scatter_nd to do the projection\n",
        "    batch_nums = tf.range(0, limit=_hps.batch_size) # shape (batch_size)\n",
        "    batch_nums = tf.expand_dims(batch_nums, 1) # shape (batch_size, 1)\n",
        "    attn_len = tf.shape(_enc_batch_extend_vocab)[1] # number of states we attend over\n",
        "    batch_nums = tf.tile(batch_nums, [1, attn_len]) # shape (batch_size, attn_len)\n",
        "    indices = tf.stack( (batch_nums, _enc_batch_extend_vocab), axis=2) # shape (batch_size, enc_t, 2)\n",
        "    shape = [_hps.batch_size, extended_vsize]\n",
        "    attn_dists_projected = tf.scatter_nd(indices, attn_dist, shape) # list length max_dec_steps (batch_size, extended_vsize)\n",
        "\n",
        "    # Add the vocab distributions and the copy distributions together to get the final distributions\n",
        "    # final_dists is a list length max_dec_steps; each entry is a tensor shape (batch_size, extended_vsize) giving the final distribution for that decoder timestep\n",
        "    # Note that for decoder timesteps and examples corresponding to a [PAD] token, this is junk - ignore.\n",
        "    final_dist = vocab_dists_extended + attn_dists_projected\n",
        "    final_dist +=1e-15 # for cases where we have zero in the final dist, especially for oov words\n",
        "    dist_sums = tf.reduce_sum(final_dist, axis=1)\n",
        "    final_dist = final_dist / tf.reshape(dist_sums, [-1, 1]) # re-normalize\n",
        "    \n",
        "    ### for NANs\n",
        "    #final_dist = add_epsilon(final_dist)\n",
        "    \n",
        "  return final_dist\n",
        "\n",
        "# Note: this function is based on tf.contrib.legacy_seq2seq_attention_decoder, which is now outdated.\n",
        "# In the future, it would make more sense to write variants on the attention mechanism using the new seq2seq library for tensorflow 1.0: https://www.tensorflow.org/api_guides/python/contrib.seq2seq#Attention\n",
        "def attention_decoder(_hps, \n",
        "  v_size, \n",
        "  _max_art_oovs, \n",
        "  _enc_batch_extend_vocab, \n",
        "  emb_dec_inputs,\n",
        "  target_batch,\n",
        "  _dec_in_state, \n",
        "  _enc_states, \n",
        "  enc_padding_mask, \n",
        "  dec_padding_mask, \n",
        "  cell, \n",
        "  embedding, \n",
        "  sampling_probability,\n",
        "  alpha,\n",
        "  unk_id,\n",
        "  initial_state_attention=False,\n",
        "  pointer_gen=True, \n",
        "  use_coverage=False, \n",
        "  prev_coverage=None, \n",
        "  prev_decoder_outputs=[], \n",
        "  prev_encoder_es = []):\n",
        "  \"\"\"\n",
        "  Args:\n",
        "    _hps: parameter of the models.\n",
        "    v_size: vocab size.\n",
        "    _max_art_oovs: size of the oov tokens in current batch.\n",
        "    _enc_batch_extend_vocab: encoder extended vocab batch.\n",
        "    emb_dec_inputs: A list of 2D Tensors [batch_size x emb_dim].\n",
        "    target_batch: The indices of the target words. shape (max_dec_steps, batch_size)\n",
        "    _dec_in_state: 2D Tensor [batch_size x cell.state_size].\n",
        "    _enc_states: 3D Tensor [batch_size x max_enc_steps x attn_size].\n",
        "    enc_padding_mask: 2D Tensor [batch_size x max_enc_steps] containing 1s and 0s; indicates which of the encoder locations are padding (0) or a real token (1).\n",
        "    dec_padding_mask: 2D Tensor [batch_size x max_dec_steps] containing 1s and 0s; indicates which of the decoder locations are padding (0) or a real token (1).\n",
        "    cell: rnn_cell.RNNCell defining the cell function and size.\n",
        "    embedding: embedding matrix [vocab_size, emb_dim].\n",
        "    sampling_probability: sampling probability for scheduled sampling.\n",
        "    alpha: soft-argmax argument.\n",
        "    initial_state_attention:\n",
        "      Note that this attention decoder passes each decoder input through a linear layer with the previous step's context vector to get a modified version of the input. If initial_state_attention is False, on the first decoder step the \"previous context vector\" is just a zero vector. If initial_state_attention is True, we use _dec_in_state to (re)calculate the previous step's context vector. We set this to False for train/eval mode (because we call attention_decoder once for all decoder steps) and True for decode mode (because we call attention_decoder once for each decoder step).\n",
        "    pointer_gen: boolean. If True, calculate the generation probability p_gen for each decoder step.\n",
        "    use_coverage: boolean. If True, use coverage mechanism.\n",
        "    prev_coverage:\n",
        "      If not None, a tensor with shape (batch_size, max_enc_steps). The previous step's coverage vector. This is only not None in decode mode when using coverage.\n",
        "    prev_decoder_outputs: if not empty, a tensor of (len(prev_decoder_steps), batch_size, hidden_dim). The previous decoder output used for calculating the intradecoder attention during decode mode\n",
        "    prev_encoder_es: if not empty, a tensor of (len(prev_encoder_es), batch_size, hidden_dim). The previous attention vector used for calculating the temporal attention during decode mode.\n",
        "  Returns:\n",
        "    outputs: A list of the same length as emb_dec_inputs of 2D Tensors of\n",
        "      shape [batch_size x cell.output_size]. The output vectors.\n",
        "    state: The final state of the decoder. A tensor shape [batch_size x cell.state_size].\n",
        "    attn_dists: A list containing tensors of shape (batch_size,max_enc_steps).\n",
        "      The attention distributions for each decoder step.\n",
        "    p_gens: List of length emb_dim, containing tensors of shape [batch_size, 1]. The values of p_gen for each decoder step. Empty list if pointer_gen=False.\n",
        "    coverage: Coverage vector on the last step computed. None if use_coverage=False.\n",
        "    vocab_scores: vocab distribution.\n",
        "    final_dists: final output distribution.\n",
        "    samples: contains sampled tokens.\n",
        "    greedy_search_samples: contains greedy tokens.\n",
        "    temporal_e: contains temporal attention.\n",
        "  \"\"\"\n",
        "  with variable_scope.variable_scope(\"attention_decoder\") as scope:\n",
        "    batch_size = _enc_states.get_shape()[0] # if this line fails, it's because the batch size isn't defined\n",
        "    attn_size = _enc_states.get_shape()[2] # if this line fails, it's because the attention length isn't defined\n",
        "    emb_size = emb_dec_inputs[0].get_shape()[1] # if this line fails, it's because the embedding isn't defined\n",
        "    decoder_attn_size = _dec_in_state.c.get_shape()[1]\n",
        "    tf.logging.info(\"batch_size %i, attn_size: %i, emb_size: %i\", batch_size, attn_size, emb_size)\n",
        "    # Reshape _enc_states (need to insert a dim)\n",
        "    _enc_states = tf.expand_dims(_enc_states, axis=2) # now is shape (batch_size, max_enc_steps, 1, attn_size)\n",
        "\n",
        "    # To calculate attention, we calculate\n",
        "    #   v^T tanh(W_h h_i + W_s s_t + b_attn)\n",
        "    # where h_i is an encoder state, and s_t a decoder state.\n",
        "    # attn_vec_size is the length of the vectors v, b_attn, (W_h h_i) and (W_s s_t).\n",
        "    # We set it to be equal to the size of the encoder states.\n",
        "    attention_vec_size = attn_size\n",
        "\n",
        "    # Get the weight matrix W_h and apply it to each encoder state to get (W_h h_i), the encoder features\n",
        "    if _hps.matrix_attention:\n",
        "      w_attn = variable_scope.get_variable(\"w_attn\", [attention_vec_size, attention_vec_size])\n",
        "      if _hps.intradecoder:\n",
        "        w_dec_attn = variable_scope.get_variable(\"w_dec_attn\", [decoder_attn_size, decoder_attn_size])\n",
        "    else:\n",
        "      W_h = variable_scope.get_variable(\"W_h\", [1, 1, attn_size, attention_vec_size])\n",
        "      v = variable_scope.get_variable(\"v\", [attention_vec_size])\n",
        "      encoder_features = nn_ops.conv2d(_enc_states, W_h, [1, 1, 1, 1], \"SAME\") # shape (batch_size,max_enc_steps,1,attention_vec_size)\n",
        "    if _hps.intradecoder:\n",
        "      W_h_d = variable_scope.get_variable(\"W_h_d\", [1, 1, decoder_attn_size, decoder_attn_size])\n",
        "      v_d = variable_scope.get_variable(\"v_d\", [decoder_attn_size])\n",
        "\n",
        "    # Get the weight vectors v and w_c (w_c is for coverage)\n",
        "    if use_coverage:\n",
        "      with variable_scope.variable_scope(\"coverage\"):\n",
        "        w_c = variable_scope.get_variable(\"w_c\", [1, 1, 1, attention_vec_size])\n",
        "\n",
        "    if prev_coverage is not None: # for beam search mode with coverage\n",
        "      # reshape from (batch_size, max_enc_steps) to (batch_size, max_enc_steps, 1, 1)\n",
        "      prev_coverage = tf.expand_dims(tf.expand_dims(prev_coverage,2),3)\n",
        "\n",
        "    def attention(decoder_state, temporal_e, coverage=None):\n",
        "      \"\"\"Calculate the context vector and attention distribution from the decoder state.\n",
        "      Args:\n",
        "        decoder_state: state of the decoder\n",
        "        temporal_e: store previous attentions for temporal attention mechanism\n",
        "        coverage: Optional. Previous timestep's coverage vector, shape (batch_size, max_enc_steps, 1, 1).\n",
        "      Returns:\n",
        "        context_vector: weighted sum of _enc_states\n",
        "        attn_dist: attention distribution\n",
        "        coverage: new coverage vector. shape (batch_size, max_enc_steps, 1, 1)\n",
        "        masked_e: store the attention score for temporal attention mechanism.\n",
        "      \"\"\"\n",
        "      with variable_scope.variable_scope(\"Attention\"):\n",
        "        # Pass the decoder state through a linear layer (this is W_s s_t + b_attn in the paper)\n",
        "        decoder_features = linear(decoder_state, attention_vec_size, True) # shape (batch_size, attention_vec_size)\n",
        "        decoder_features = tf.expand_dims(tf.expand_dims(decoder_features, 1), 1) # reshape to (batch_size, 1, 1, attention_vec_size)\n",
        "\n",
        "        # We can't have coverage with matrix attention\n",
        "        if not _hps.matrix_attention and use_coverage and coverage is not None: # non-first step of coverage\n",
        "          # Multiply coverage vector by w_c to get coverage_features.\n",
        "          coverage_features = nn_ops.conv2d(coverage, w_c, [1, 1, 1, 1], \"SAME\") # c has shape (batch_size, max_enc_steps, 1, attention_vec_size)\n",
        "          # Calculate v^T tanh(W_h h_i + W_s s_t + w_c c_i^t + b_attn)\n",
        "          e_not_masked = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features + coverage_features), [2, 3])  # shape (batch_size,max_enc_steps)\n",
        "          masked_e = nn_ops.softmax(e_not_masked) * enc_padding_mask # (batch_size, max_enc_steps)\n",
        "          masked_sums = tf.reduce_sum(masked_e, axis=1) # shape (batch_size)\n",
        "          masked_e = masked_e / tf.reshape(masked_sums, [-1, 1])\n",
        "          # Equation 3 in \n",
        "          if _hps.use_temporal_attention:\n",
        "            try:\n",
        "              len_temporal_e = temporal_e.get_shape()[0]\n",
        "            except:\n",
        "              len_temporal_e = 0\n",
        "            if len_temporal_e==0:\n",
        "              attn_dist = masked_e\n",
        "            else:\n",
        "              masked_sums = tf.reduce_sum(temporal_e,axis=0)+1e-10 # if it's zero due to masking we set it to a small value\n",
        "              attn_dist = masked_e / masked_sums # (batch_size, max_enc_steps)\n",
        "          else:\n",
        "            attn_dist = masked_e\n",
        "          masked_attn_sums = tf.reduce_sum(attn_dist, axis=1)\n",
        "          attn_dist = attn_dist / tf.reshape(masked_attn_sums, [-1, 1]) # re-normalize\n",
        "          # Update coverage vector\n",
        "          coverage += array_ops.reshape(attn_dist, [batch_size, -1, 1, 1])\n",
        "        else:\n",
        "          if _hps.matrix_attention:\n",
        "            # Calculate h_d * W_attn * h_i, equation 2 in https://arxiv.org/pdf/1705.04304.pdf\n",
        "            _dec_attn = tf.unstack(tf.matmul(tf.squeeze(decoder_features,axis=[1,2]),w_attn),axis=0) # batch_size * (attention_vec_size)\n",
        "            _enc_states_lst = tf.unstack(tf.squeeze(_enc_states,axis=2),axis=0) # batch_size * (max_enc_steps, attention_vec_size)\n",
        "\n",
        "            e_not_masked = tf.squeeze(tf.stack([tf.matmul(tf.reshape(_dec,[1,-1]), tf.transpose(_enc)) for _dec, _enc in zip(_dec_attn,_enc_states_lst)]),axis=1) # (batch_size, max_enc_steps)\n",
        "            masked_e = tf.exp(e_not_masked * enc_padding_mask) # (batch_size, max_enc_steps)\n",
        "          else:\n",
        "            # Calculate v^T tanh(W_h h_i + W_s s_t + b_attn)\n",
        "            e_not_masked = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features), [2, 3]) # calculate e, (batch_size, max_enc_steps)\n",
        "            masked_e = nn_ops.softmax(e_not_masked) * enc_padding_mask # (batch_size, max_enc_steps)\n",
        "            masked_sums = tf.reduce_sum(masked_e, axis=1) # shape (batch_size)\n",
        "            masked_e = masked_e / tf.reshape(masked_sums, [-1, 1])\n",
        "          if _hps.use_temporal_attention:\n",
        "            try:\n",
        "              len_temporal_e = temporal_e.get_shape()[0]\n",
        "            except:\n",
        "              len_temporal_e = 0\n",
        "            if len_temporal_e==0:\n",
        "              attn_dist = masked_e\n",
        "            else:\n",
        "              masked_sums = tf.reduce_sum(temporal_e,axis=0)+1e-10 # if it's zero due to masking we set it to a small value\n",
        "              attn_dist = masked_e / masked_sums # (batch_size, max_enc_steps)\n",
        "          else:\n",
        "            attn_dist = masked_e\n",
        "          # Calculate attention distribution\n",
        "          masked_attn_sums = tf.reduce_sum(attn_dist, axis=1)\n",
        "          attn_dist = attn_dist / tf.reshape(masked_attn_sums, [-1, 1]) # re-normalize\n",
        "\n",
        "          if use_coverage: # first step of training\n",
        "            coverage = tf.expand_dims(tf.expand_dims(attn_dist,2),2) # initialize coverage\n",
        "\n",
        "        # Calculate the context vector from attn_dist and _enc_states\n",
        "        context_vector = math_ops.reduce_sum(array_ops.reshape(attn_dist, [batch_size, -1, 1, 1]) * _enc_states, [1, 2]) # shape (batch_size, attn_size).\n",
        "        context_vector = array_ops.reshape(context_vector, [-1, attn_size])\n",
        "\n",
        "      return context_vector, attn_dist, coverage, masked_e\n",
        "\n",
        "    def intra_decoder_attention(decoder_state, outputs):\n",
        "      \"\"\"Calculate the context vector and attention distribution from the decoder state.\n",
        "      Args:\n",
        "        decoder_state: state of the decoder\n",
        "        outputs: list of decoder states for implementing intra-decoder mechanism, len(decoder_states) * (batch_size, hidden_dim)\n",
        "      Returns:\n",
        "        context_decoder_vector: weighted sum of _dec_states\n",
        "        decoder_attn_dist: intra-decoder attention distribution\n",
        "      \"\"\"\n",
        "      attention_dec_vec_size = attn_dec_size = decoder_state.c.get_shape()[1] # hidden_dim\n",
        "      try:\n",
        "        len_dec_states = outputs.get_shape()[0]\n",
        "      except:\n",
        "        len_dec_states = 0\n",
        "      attention_dec_vec_size = attn_dec_size = decoder_state.c.get_shape()[1] # hidden_dim\n",
        "      _decoder_states = tf.expand_dims(tf.reshape(outputs,[batch_size,-1,attn_dec_size]), axis=2) # now is shape (batch_size,len(decoder_states), 1, attn_size)\n",
        "      _prev_decoder_features = nn_ops.conv2d(_decoder_states, W_h_d, [1, 1, 1, 1], \"SAME\") # shape (batch_size,len(decoder_states),1,attention_vec_size)\n",
        "      with variable_scope.variable_scope(\"DecoderAttention\"):\n",
        "        # Pass the decoder state through a linear layer (this is W_s s_t + b_attn in the paper)\n",
        "        try:\n",
        "          decoder_features = linear(decoder_state, attention_dec_vec_size, True) # shape (batch_size, attention_vec_size)\n",
        "          decoder_features = tf.expand_dims(tf.expand_dims(decoder_features, 1), 1) # reshape to (batch_size, 1, 1, attention_dec_vec_size)\n",
        "          # Calculate v^T tanh(W_h h_i + W_s s_t + b_attn)\n",
        "          if _hps.matrix_attention:\n",
        "            # Calculate h_d * W_attn * h_d, equation 6 in https://arxiv.org/pdf/1705.04304.pdf\n",
        "            _dec_attn = tf.matmul(tf.squeeze(decoder_features),w_dec_attn) # (batch_size, decoder_attn_size)\n",
        "            _dec_states_lst = tf.unstack(tf.reshape(_prev_decoder_features,[batch_size,-1,decoder_attn_size])) # batch_size * (len(decoder_states), decoder_attn_size)\n",
        "            e_not_masked = tf.reshape(tf.stack([tf.matmul(_dec_attn, tf.transpose(k)) for k in _dec_states_lst]),[batch_size,-1]) # (batch_size, len(decoder_states))\n",
        "            masked_e = tf.exp(e_not_masked * dec_padding_mask[:,:len_dec_states]) # (batch_size, len(decoder_states))\n",
        "          else:\n",
        "            # Calculate v^T tanh(W_h h_i + W_s s_t + b_attn)\n",
        "            e_not_masked = math_ops.reduce_sum(v_d * math_ops.tanh(_prev_decoder_features + decoder_features), [2, 3]) # calculate e, (batch_size,len(decoder_states))\n",
        "            masked_e = nn_ops.softmax(e_not_masked) * dec_padding_mask[:,:len_dec_states] # (batch_size,len(decoder_states))\n",
        "          if len_dec_states <= 1:\n",
        "            masked_e = array_ops.ones([batch_size,1]) # first step is filled with equal values\n",
        "          masked_sums = tf.reshape(tf.reduce_sum(masked_e,axis=1),[-1,1]) # (batch_size,1), # if it's zero due to masking we set it to a small value\n",
        "          decoder_attn_dist = masked_e / masked_sums # (batch_size,len(decoder_states))\n",
        "          context_decoder_vector = math_ops.reduce_sum(array_ops.reshape(decoder_attn_dist, [batch_size, -1, 1, 1]) * _decoder_states, [1, 2]) # (batch_size, attn_size)\n",
        "          context_decoder_vector = array_ops.reshape(context_decoder_vector, [-1, attn_dec_size]) # (batch_size, attn_size)\n",
        "        except:\n",
        "          return array_ops.zeros([batch_size, decoder_attn_size]), array_ops.zeros([batch_size, 0])\n",
        "      return context_decoder_vector, decoder_attn_dist\n",
        "\n",
        "    outputs = []\n",
        "    temporal_e = []\n",
        "    attn_dists = []\n",
        "    vocab_scores = []\n",
        "    vocab_dists = []\n",
        "    final_dists = []\n",
        "    p_gens = []\n",
        "    samples = [] # this holds the words chosen by sampling based on the final distribution for each decoding step, list of max_dec_steps of (batch_size, 1)\n",
        "    greedy_search_samples = [] # this holds the words chosen by greedy search (taking the max) on the final distribution for each decoding step, list of max_dec_steps of (batch_size, 1)\n",
        "    sampling_rewards = [] # list of size max_dec_steps (batch_size, k)\n",
        "    greedy_rewards = [] # list of size max_dec_steps (batch_size, k)\n",
        "    state = _dec_in_state\n",
        "    coverage = prev_coverage # initialize coverage to None or whatever was passed in\n",
        "    context_vector = array_ops.zeros([batch_size, attn_size])\n",
        "    context_decoder_vector = array_ops.zeros([batch_size, decoder_attn_size])\n",
        "    context_vector.set_shape([None, attn_size])  # Ensure the second shape of attention vectors is set.\n",
        "    if initial_state_attention: # true in decode mode\n",
        "      # Re-calculate the context vector from the previous step so that we can pass it through a linear layer with this step's input to get a modified version of the input\n",
        "      context_vector, _, coverage, _ = attention(_dec_in_state, tf.stack(prev_encoder_es,axis=0), coverage) # in decode mode, this is what updates the coverage vector\n",
        "      if _hps.intradecoder:\n",
        "        context_decoder_vector, _ = intra_decoder_attention(_dec_in_state, tf.stack(prev_decoder_outputs,axis=0))\n",
        "    for i, inp in enumerate(emb_dec_inputs):\n",
        "      tf.logging.info(\"Adding attention_decoder timestep %i of %i\", i, len(emb_dec_inputs))\n",
        "      \n",
        "      \n",
        "      if i > 0:\n",
        "        variable_scope.get_variable_scope().reuse_variables()\n",
        "\n",
        "      if _hps.mode in ['train','eval'] and _hps.scheduled_sampling and i > 0: # start scheduled sampling after we received the first decoder's output\n",
        "        # modify the input to next decoder using scheduled sampling\n",
        "        if FLAGS.scheduled_sampling_final_dist:\n",
        "          inp = scheduled_sampling(_hps, sampling_probability, final_dist, embedding, inp, alpha)\n",
        "        else:\n",
        "          inp = scheduled_sampling_vocab_dist(_hps, sampling_probability, vocab_dist, embedding, inp, alpha)\n",
        "\n",
        "      # Merge input and previous attentions into one vector x of the same size as inp\n",
        "      emb_dim = inp.get_shape().with_rank(2)[1]\n",
        "      if emb_dim is None:\n",
        "        raise ValueError(\"Could not infer input size from input: %s\" % inp.name)\n",
        "\n",
        "      x = linear([inp] + [context_vector], emb_dim, True)\n",
        "      # Run the decoder RNN cell. cell_output = decoder state\n",
        "      cell_output, state = cell(x, state)\n",
        "\n",
        "      # Run the attention mechanism.\n",
        "      if i == 0 and initial_state_attention:  # always true in decode mode\n",
        "        with variable_scope.variable_scope(variable_scope.get_variable_scope()):#, reuse=True): # you need this because you've already run the initial attention(...) call\n",
        "          context_vector, attn_dist, _, masked_e = attention(state, tf.stack(prev_encoder_es,axis=0), coverage) # don't allow coverage to update\n",
        "          if _hps.intradecoder:\n",
        "            context_decoder_vector, _ = intra_decoder_attention(state, tf.stack(prev_decoder_outputs,axis=0))\n",
        "      else:\n",
        "        context_vector, attn_dist, coverage, masked_e = attention(state, tf.stack(temporal_e,axis=0), coverage)\n",
        "        if _hps.intradecoder:\n",
        "          context_decoder_vector, _ = intra_decoder_attention(state, tf.stack(outputs,axis=0))\n",
        "      attn_dists.append(attn_dist)\n",
        "      temporal_e.append(masked_e)\n",
        "\n",
        "      with variable_scope.variable_scope(\"combined_context\"):\n",
        "        if _hps.intradecoder:\n",
        "          context_vector = linear([context_vector] + [context_decoder_vector], attn_size, False)\n",
        "      # Calculate p_gen\n",
        "      if pointer_gen:\n",
        "        with tf.variable_scope('calculate_pgen'):\n",
        "          p_gen = linear([context_vector, state.c, state.h, x], 1, True) # Tensor shape (batch_size, 1)\n",
        "          p_gen = tf.sigmoid(p_gen)\n",
        "          p_gens.append(p_gen)\n",
        "\n",
        "      # Concatenate the cell_output (= decoder state) and the context vector, and pass them through a linear layer\n",
        "      # This is V[s_t, h*_t] + b in the paper\n",
        "      with variable_scope.variable_scope(\"AttnOutputProjection\"):\n",
        "        output = linear([cell_output] + [context_vector], cell.output_size, True)\n",
        "      outputs.append(output)\n",
        "\n",
        "      # Add the output projection to obtain the vocabulary distribution\n",
        "      with tf.variable_scope('output_projection'):\n",
        "        if i > 0:\n",
        "          tf.get_variable_scope().reuse_variables()\n",
        "        trunc_norm_init = tf.truncated_normal_initializer(stddev=_hps.trunc_norm_init_std)\n",
        "        w_out = tf.get_variable('w', [_hps.dec_hidden_dim, v_size], dtype=tf.float32, initializer=trunc_norm_init)\n",
        "        #w_t_out = tf.transpose(w)\n",
        "        v_out = tf.get_variable('v', [v_size], dtype=tf.float32, initializer=trunc_norm_init)\n",
        "        if i > 0:\n",
        "          tf.get_variable_scope().reuse_variables()\n",
        "        if FLAGS.share_decoder_weights: # Eq. 13 in https://arxiv.org/pdf/1705.04304.pdf\n",
        "          w_out = tf.transpose(\n",
        "            math_ops.tanh(linear([embedding] + [tf.transpose(w_out)], _hps.dec_hidden_dim, bias=False)))\n",
        "        score = tf.nn.xw_plus_b(output, w_out, v_out)\n",
        "        if _hps.scheduled_sampling and not _hps.greedy_scheduled_sampling:\n",
        "          # Gumbel reparametrization trick: https://arxiv.org/abs/1704.06970\n",
        "          U = tf.random_uniform(score.get_shape(),10e-12,(1-10e-12)) # add a small number to avoid log(0)\n",
        "          G = -tf.log(-tf.log(U))\n",
        "          score = score + G\n",
        "        vocab_scores.append(score) # apply the linear layer\n",
        "        vocab_dist = tf.nn.softmax(score)\n",
        "        vocab_dists.append(vocab_dist) # The vocabulary distributions. List length max_dec_steps of (batch_size, vsize) arrays. The words are in the order they appear in the vocabulary file.\n",
        "\n",
        "      # For pointer-generator model, calc final distribution from copy distribution and vocabulary distribution\n",
        "      if _hps.pointer_gen:\n",
        "        final_dist = _calc_final_dist(_hps, v_size, _max_art_oovs, _enc_batch_extend_vocab, p_gen, vocab_dist,\n",
        "                                      attn_dist)\n",
        "      else: # final distribution is just vocabulary distribution\n",
        "        final_dist = vocab_dist\n",
        "      final_dists.append(final_dist)\n",
        "\n",
        "      # get the sampled token and greedy token\n",
        "      # this will take the final_dist and sample from it for a total count of k (k samples)\n",
        "      one_hot_k_samples = tf.distributions.Multinomial(total_count=1., probs=final_dist).sample(\n",
        "        _hps.k)  # sample k times according to https://arxiv.org/pdf/1705.04304.pdf, size (k, batch_size, extended_vsize)\n",
        "      k_argmax = tf.argmax(one_hot_k_samples, axis=2, output_type=tf.int32) # (k, batch_size)\n",
        "      k_sample = tf.transpose(k_argmax) # shape (batch_size, k)\n",
        "      greedy_search_prob, greedy_search_sample = tf.nn.top_k(final_dist, k=_hps.k) # (batch_size, k)\n",
        "      greedy_search_samples.append(greedy_search_sample)\n",
        "      samples.append(k_sample)\n",
        "      if FLAGS.use_discounted_rewards:\n",
        "        _sampling_rewards = []\n",
        "        _greedy_rewards = []\n",
        "        for _ in range(_hps.k):\n",
        "          rl_fscore = tf.reshape(rouge_l_fscore(tf.transpose(tf.stack(samples)[:, :, _]), target_batch),\n",
        "                                 [-1, 1])  # shape (batch_size, 1)\n",
        "          _sampling_rewards.append(tf.reshape(rl_fscore, [-1, 1]))\n",
        "          rl_fscore = tf.reshape(rouge_l_fscore(tf.transpose(tf.stack(greedy_search_samples)[:, :, _]), target_batch),\n",
        "                                 [-1, 1])  # shape (batch_size, 1)\n",
        "          _greedy_rewards.append(tf.reshape(rl_fscore, [-1, 1]))\n",
        "        sampling_rewards.append(tf.squeeze(tf.stack(_sampling_rewards, axis=1), axis = -1)) # (batch_size, k)\n",
        "        greedy_rewards.append(tf.squeeze(tf.stack(_greedy_rewards, axis=1), axis = -1))  # (batch_size, k)\n",
        "\n",
        "    if FLAGS.use_discounted_rewards:\n",
        "      sampling_rewards = tf.stack(sampling_rewards)\n",
        "      greedy_rewards = tf.stack(greedy_rewards)\n",
        "    else:\n",
        "      _sampling_rewards = []\n",
        "      _greedy_rewards = []\n",
        "      for _ in range(_hps.k):\n",
        "        rl_fscore = rouge_l_fscore(tf.transpose(tf.stack(samples)[:, :, _]), target_batch) # shape (batch_size, 1)\n",
        "        _sampling_rewards.append(tf.reshape(rl_fscore, [-1, 1]))\n",
        "        rl_fscore = rouge_l_fscore(tf.transpose(tf.stack(greedy_search_samples)[:, :, _]), target_batch)  # shape (batch_size, 1)\n",
        "        _greedy_rewards.append(tf.reshape(rl_fscore, [-1, 1]))\n",
        "      sampling_rewards = tf.squeeze(tf.stack(_sampling_rewards, axis=1), axis=-1) # (batch_size, k)\n",
        "      greedy_rewards = tf.squeeze(tf.stack(_greedy_rewards, axis=1), axis=-1) # (batch_size, k)\n",
        "    # If using coverage, reshape it\n",
        "    if coverage is not None:\n",
        "      coverage = array_ops.reshape(coverage, [batch_size, -1])\n",
        "\n",
        "  return (\n",
        "  outputs, state, attn_dists, p_gens, coverage, vocab_scores, final_dists, samples, greedy_search_samples, temporal_e,\n",
        "  sampling_rewards, greedy_rewards)\n",
        "\n",
        "def scheduled_sampling(hps, sampling_probability, output, embedding, inp, alpha = 0):\n",
        "  # borrowed ideas from https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/ScheduledEmbeddingTrainingHelper\n",
        "  vocab_size = embedding.get_shape()[0]\n",
        "\n",
        "  def soft_argmax(alpha, _output):\n",
        "    new_oov_scores = tf.reshape(_output[:, 0] + tf.reduce_sum(_output[:, vocab_size:], axis=1),\n",
        "                                [-1, 1])  # add score for all OOV to the UNK score\n",
        "    _output = tf.concat([new_oov_scores, _output[:, 1:vocab_size]], axis=1) # select only the vocab_size outputs\n",
        "    _output = _output / tf.reshape(tf.reduce_sum(output, axis=1), [-1, 1]) # re-normalize scores\n",
        "\n",
        "    #alpha_exp = tf.exp(alpha * _output) # (batch_size, vocab_size)\n",
        "    #one_hot_scores = alpha_exp / tf.reshape(tf.reduce_sum(alpha_exp, axis=1),[-1,1]) #(batch_size, vocab_size)\n",
        "    one_hot_scores = tf.nn.softmax((alpha * _output))\n",
        "    return one_hot_scores\n",
        "\n",
        "  def soft_top_k(alpha, _output, K):\n",
        "    copy = tf.identity(_output)\n",
        "    p = []\n",
        "    arg_top_k = []\n",
        "    for k in range(K):\n",
        "      sargmax = soft_argmax(alpha, copy)\n",
        "      copy = (1-sargmax)* copy\n",
        "      p.append(tf.reduce_sum(sargmax * _output, axis=1))\n",
        "      arg_top_k.append(sargmax)\n",
        "\n",
        "    return tf.stack(p, axis=1), tf.stack(arg_top_k)\n",
        "\n",
        "  with variable_scope.variable_scope(\"ScheduledEmbedding\"):\n",
        "    # Return -1s where we did not sample, and sample_ids elsewhere\n",
        "    select_sampler = bernoulli.Bernoulli(probs=sampling_probability, dtype=tf.bool)\n",
        "    select_sample = select_sampler.sample(sample_shape=hps.batch_size)\n",
        "    sample_id_sampler = categorical.Categorical(probs=output) # equals to argmax{ Multinomial(output, total_count=1) }, our greedy search selection\n",
        "    sample_ids = array_ops.where(\n",
        "            select_sample,\n",
        "            sample_id_sampler.sample(seed=123),\n",
        "            gen_array_ops.fill([hps.batch_size], -1))\n",
        "\n",
        "    where_sampling = math_ops.cast(\n",
        "        array_ops.where(sample_ids > -1), tf.int32)\n",
        "    where_not_sampling = math_ops.cast(\n",
        "        array_ops.where(sample_ids <= -1), tf.int32)\n",
        "\n",
        "    if hps.greedy_scheduled_sampling:\n",
        "      sample_ids = tf.argmax(output, axis=1, output_type=tf.int32)\n",
        "\n",
        "    sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)\n",
        "\n",
        "    cond = tf.less(sample_ids_sampling, vocab_size) # replace oov with unk\n",
        "    sample_ids_sampling = tf.cast(cond, tf.int32) * sample_ids_sampling\n",
        "    inputs_not_sampling = array_ops.gather_nd(inp, where_not_sampling)\n",
        "\n",
        "    if hps.E2EBackProp:\n",
        "      if hps.hard_argmax:\n",
        "        greedy_search_prob, greedy_search_sample = tf.nn.top_k(output, k=hps.k) # (batch_size, k)\n",
        "        greedy_search_prob_normalized = greedy_search_prob/tf.reshape(tf.reduce_sum(greedy_search_prob,axis=1),[-1,1])\n",
        "\n",
        "        cond = tf.less(greedy_search_sample, vocab_size) # replace oov with unk\n",
        "        greedy_search_sample = tf.cast(cond, tf.int32) * greedy_search_sample\n",
        "\n",
        "        greedy_embedding = tf.nn.embedding_lookup(embedding, greedy_search_sample)\n",
        "        normalized_embedding = tf.multiply(tf.reshape(greedy_search_prob_normalized,[hps.batch_size,hps.k,1]), greedy_embedding)\n",
        "        e2e_embedding = tf.reduce_mean(normalized_embedding,axis=1)\n",
        "      else:\n",
        "        e = []\n",
        "        greedy_search_prob, greedy_search_sample = soft_top_k(alpha, output,\n",
        "                                                              K=hps.k)  # (batch_size, k), (k, batch_size, vocab_size)\n",
        "        greedy_search_prob_normalized = greedy_search_prob / tf.reshape(tf.reduce_sum(greedy_search_prob, axis=1),\n",
        "                                                                        [-1, 1])\n",
        "\n",
        "        for _ in range(hps.k):\n",
        "          a_k = greedy_search_sample[_]\n",
        "          e_k = tf.matmul(tf.reshape(greedy_search_prob_normalized[:,_],[-1,1]) * a_k, embedding)\n",
        "          e.append(e_k)\n",
        "        e2e_embedding = tf.reduce_sum(e, axis=0) # (batch_size, emb_dim)\n",
        "      sampled_next_inputs = array_ops.gather_nd(e2e_embedding, where_sampling)\n",
        "    else:\n",
        "      if hps.hard_argmax:\n",
        "        sampled_next_inputs = tf.nn.embedding_lookup(embedding, sample_ids_sampling)\n",
        "      else: # using soft armax (greedy) proposed in: https://arxiv.org/abs/1704.06970\n",
        "        #alpha_exp = tf.exp(alpha * (output_not_extended + G)) # (batch_size, vocab_size)\n",
        "        #one_hot_scores = alpha_exp / tf.reduce_sum(alpha_exp, axis=1) #(batch_size, vocab_size)\n",
        "        one_hot_scores = soft_argmax(alpha, output) #(batch_size, vocab_size)\n",
        "        soft_argmax_embedding = tf.matmul(one_hot_scores, embedding) #(batch_size, emb_size)\n",
        "        sampled_next_inputs = array_ops.gather_nd(soft_argmax_embedding, where_sampling)\n",
        "\n",
        "    base_shape = array_ops.shape(inp)\n",
        "    result1 = array_ops.scatter_nd(indices=where_sampling, updates=sampled_next_inputs, shape=base_shape)\n",
        "    result2 = array_ops.scatter_nd(indices=where_not_sampling, updates=inputs_not_sampling, shape=base_shape)\n",
        "    return result1 + result2\n",
        "\n",
        "def scheduled_sampling_vocab_dist(hps, sampling_probability, output, embedding, inp, alpha = 0):\n",
        "  # borrowed ideas from https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/ScheduledEmbeddingTrainingHelper\n",
        "\n",
        "  def soft_argmax(alpha, output):\n",
        "    #alpha_exp = tf.exp(alpha * output) # (batch_size, vocab_size)\n",
        "    #one_hot_scores = alpha_exp / tf.reshape(tf.reduce_sum(alpha_exp, axis=1),[-1,1]) #(batch_size, vocab_size)\n",
        "    one_hot_scores = tf.nn.softmax(alpha * output)\n",
        "    return one_hot_scores\n",
        "\n",
        "  def soft_top_k(alpha, output, K):\n",
        "    copy = tf.identity(output)\n",
        "    p = []\n",
        "    arg_top_k = []\n",
        "    for k in range(K):\n",
        "      sargmax = soft_argmax(alpha, copy)\n",
        "      copy = (1-sargmax)* copy\n",
        "      p.append(tf.reduce_sum(sargmax * output, axis=1))\n",
        "      arg_top_k.append(sargmax)\n",
        "\n",
        "    return tf.stack(p, axis=1), tf.stack(arg_top_k)\n",
        "\n",
        "  with variable_scope.variable_scope(\"ScheduledEmbedding\"):\n",
        "    # Return -1s where we did not sample, and sample_ids elsewhere\n",
        "    select_sampler = bernoulli.Bernoulli(probs=sampling_probability, dtype=tf.bool)\n",
        "    select_sample = select_sampler.sample(sample_shape=hps.batch_size)\n",
        "    sample_id_sampler = categorical.Categorical(probs=output) # equals to argmax{ Multinomial(output, total_count=1) }, our greedy search selection\n",
        "    sample_ids = array_ops.where(\n",
        "            select_sample,\n",
        "            sample_id_sampler.sample(seed=123),\n",
        "            gen_array_ops.fill([hps.batch_size], -1))\n",
        "\n",
        "    where_sampling = math_ops.cast(\n",
        "        array_ops.where(sample_ids > -1), tf.int32)\n",
        "    where_not_sampling = math_ops.cast(\n",
        "        array_ops.where(sample_ids <= -1), tf.int32)\n",
        "\n",
        "    if hps.greedy_scheduled_sampling:\n",
        "      sample_ids = tf.argmax(output, axis=1, output_type=tf.int32)\n",
        "\n",
        "    sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)\n",
        "    inputs_not_sampling = array_ops.gather_nd(inp, where_not_sampling)\n",
        "\n",
        "    if hps.E2EBackProp:\n",
        "      if hps.hard_argmax:\n",
        "        greedy_search_prob, greedy_search_sample = tf.nn.top_k(output, k=hps.k) # (batch_size, k)\n",
        "        greedy_search_prob_normalized = greedy_search_prob/tf.reshape(tf.reduce_sum(greedy_search_prob,axis=1),[-1,1])\n",
        "        greedy_embedding = tf.nn.embedding_lookup(embedding, greedy_search_sample)\n",
        "        normalized_embedding = tf.multiply(tf.reshape(greedy_search_prob_normalized,[hps.batch_size,hps.k,1]), greedy_embedding)\n",
        "        e2e_embedding = tf.reduce_mean(normalized_embedding,axis=1)\n",
        "      else:\n",
        "        e = []\n",
        "        greedy_search_prob, greedy_search_sample = soft_top_k(alpha, output,\n",
        "                                                              K=hps.k)  # (batch_size, k), (k, batch_size, vocab_size)\n",
        "        greedy_search_prob_normalized = greedy_search_prob / tf.reshape(tf.reduce_sum(greedy_search_prob, axis=1),\n",
        "                                                                        [-1, 1])\n",
        "\n",
        "        for _ in range(hps.k):\n",
        "          a_k = greedy_search_sample[_]\n",
        "          e_k = tf.matmul(tf.reshape(greedy_search_prob_normalized[:,_],[-1,1]) * a_k, embedding)\n",
        "          e.append(e_k)\n",
        "        e2e_embedding = tf.reduce_sum(e, axis=0) # (batch_size, emb_dim)\n",
        "      sampled_next_inputs = array_ops.gather_nd(e2e_embedding, where_sampling)\n",
        "    else:\n",
        "      if hps.hard_argmax:\n",
        "        sampled_next_inputs = tf.nn.embedding_lookup(embedding, sample_ids_sampling)\n",
        "      else: # using soft armax (greedy) proposed in: https://arxiv.org/abs/1704.06970\n",
        "        #alpha_exp = tf.exp(alpha * (output_not_extended + G)) # (batch_size, vocab_size)\n",
        "        #one_hot_scores = alpha_exp / tf.reduce_sum(alpha_exp, axis=1) #(batch_size, vocab_size)\n",
        "        one_hot_scores = soft_argmax(alpha, output) #(batch_size, vocab_size)\n",
        "        soft_argmax_embedding = tf.matmul(one_hot_scores, embedding) #(batch_size, emb_size)\n",
        "        sampled_next_inputs = array_ops.gather_nd(soft_argmax_embedding, where_sampling)\n",
        "\n",
        "    base_shape = array_ops.shape(inp)\n",
        "    result1 = array_ops.scatter_nd(indices=where_sampling, updates=sampled_next_inputs, shape=base_shape)\n",
        "    result2 = array_ops.scatter_nd(indices=where_not_sampling, updates=inputs_not_sampling, shape=base_shape)\n",
        "    return result1 + result2\n",
        "\n",
        "def linear(args, output_size, bias, bias_start=0.0, scope=None):\n",
        "  \"\"\"Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.\n",
        "  Args:\n",
        "    args: a 2D Tensor or a list of 2D, batch x n, Tensors.\n",
        "    output_size: int, second dimension of W[i].\n",
        "    bias: boolean, whether to add a bias term or not.\n",
        "    bias_start: starting value to initialize the bias; 0 by default.\n",
        "    scope: VariableScope for the created subgraph; defaults to \"Linear\".\n",
        "  Returns:\n",
        "    A 2D Tensor with shape [batch x output_size] equal to\n",
        "    sum_i(args[i] * W[i]), where W[i]s are newly created matrices.\n",
        "  Raises:\n",
        "    ValueError: if some of the arguments has unspecified or wrong shape.\n",
        "  \"\"\"\n",
        "  if args is None or (isinstance(args, (list, tuple)) and not args):\n",
        "    raise ValueError(\"`args` must be specified\")\n",
        "  if not isinstance(args, (list, tuple)):\n",
        "    args = [args]\n",
        "  # Calculate the total size of arguments on dimension 1.\n",
        "  total_arg_size = 0\n",
        "  shapes = [a.get_shape().as_list() for a in args]\n",
        "  for shape in shapes:\n",
        "    if len(shape) != 2:\n",
        "      raise ValueError(\"Linear is expecting 2D arguments: %s\" % str(shapes))\n",
        "    if not shape[1]:\n",
        "      raise ValueError(\"Linear expects shape[1] of arguments: %s\" % str(shapes))\n",
        "    else:\n",
        "      total_arg_size += shape[1]\n",
        "\n",
        "  # Now the computation.\n",
        "  with tf.variable_scope(scope or \"Linear\" , reuse=tf.AUTO_REUSE):\n",
        "    matrix = tf.get_variable(\"Matrix\", [total_arg_size, output_size])\n",
        "    if len(args) == 1:\n",
        "      res = tf.matmul(args[0], matrix)\n",
        "    else:\n",
        "      res = tf.matmul(tf.concat(axis=1, values=args), matrix)\n",
        "    if not bias:\n",
        "      return res\n",
        "    bias_term = tf.get_variable(\n",
        "        \"Bias\", [output_size], initializer=tf.constant_initializer(bias_start))\n",
        "  return res + bias_term"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "y4EKS0e5IsEB",
        "colab_type": "text"
      },
      "source": [
        "### Model"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "HZxgFOulIu3b",
        "colab_type": "text"
      },
      "source": [
        "https://github.com/yaserkl/RLSeq2Seq/blob/master/src/model.py"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "wOpxpsHjIwaE",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n",
        "# Modifications Copyright 2017 Abigail See\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#     http://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License.\n",
        "# ==============================================================================\n",
        "\n",
        "\"\"\"This file contains code to build and run the tensorflow graph for the sequence-to-sequence model\"\"\"\n",
        "\n",
        "import os\n",
        "import time\n",
        "import numpy as np\n",
        "import tensorflow as tf\n",
        "#from attention_decoder import attention_decoder\n",
        "##from tensorflow.contrib.tensorboard.plugins import projector\n",
        "from nltk.translate.bleu_score import sentence_bleu\n",
        "#from rouge import rouge\n",
        "#from rouge_tensor import rouge_l_fscore\n",
        "#import data\n",
        "#from replay_buffer import Transition\n",
        "\n",
        "FLAGS = tf.app.flags.FLAGS\n",
        "\n",
        "class SummarizationModel(object):\n",
        "  \"\"\"A class to represent a sequence-to-sequence model for text summarization. Supports both baseline mode, pointer-generator mode, and coverage\"\"\"\n",
        "\n",
        "  def __init__(self, hps, vocab):\n",
        "    self._hps = hps\n",
        "    self._vocab = vocab\n",
        "\n",
        "  def reward_function(self, reference, summary, measure='rouge_l/f_score'):\n",
        "    \"\"\"Calculate the reward between the reference and summary.\n",
        "    Args:\n",
        "      reference: A list of ids representing the ground-truth data\n",
        "      summary: A list of ids representing the model generated data\n",
        "    Returns:\n",
        "      A single value representing the evaluation value for reference and summary\n",
        "    \"\"\"\n",
        "    if 'rouge' in measure:\n",
        "      return rouge([summary],[reference])[measure]\n",
        "    else:\n",
        "      return sentence_bleu([reference.split()],summary.split(),weights=(0.25,0.25,0.25,0.25))\n",
        "\n",
        "  def variable_summaries(self, var_name, var):\n",
        "    \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n",
        "    with tf.name_scope('summaries_{}'.format(var_name)):\n",
        "      mean = tf.reduce_mean(var)\n",
        "      tf.summary.scalar('mean', mean)\n",
        "      with tf.name_scope('stddev'):\n",
        "        stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n",
        "      tf.summary.scalar('stddev', stddev)\n",
        "      tf.summary.scalar('max', tf.reduce_max(var))\n",
        "      tf.summary.scalar('min', tf.reduce_min(var))\n",
        "      tf.summary.histogram('histogram', var)\n",
        "\n",
        "  def _add_placeholders(self):\n",
        "    \"\"\"Add placeholders to the graph. These are entry points for any input data.\"\"\"\n",
        "    hps = self._hps\n",
        "\n",
        "    # encoder part\n",
        "    self._enc_batch = tf.placeholder(tf.int32, [hps.batch_size, None], name='enc_batch')\n",
        "    self._enc_lens = tf.placeholder(tf.int32, [hps.batch_size], name='enc_lens')\n",
        "    self._enc_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, None], name='enc_padding_mask')\n",
        "    self._eta = tf.placeholder(tf.float32, None, name='eta')\n",
        "    if FLAGS.embedding != \"\":\n",
        "      self.embedding_place = tf.placeholder(tf.float32, [self._vocab.size(), hps.emb_dim])\n",
        "    if FLAGS.pointer_gen:\n",
        "      self._enc_batch_extend_vocab = tf.placeholder(tf.int32, [hps.batch_size, None], name='enc_batch_extend_vocab')\n",
        "      self._max_art_oovs = tf.placeholder(tf.int32, [], name='max_art_oovs')\n",
        "    if FLAGS.ac_training: # added by yaserkl@vt.edu for the purpose of calculating rouge loss\n",
        "      self._q_estimates = tf.placeholder(tf.float32, [self._hps.batch_size,self._hps.k,self._hps.max_dec_steps, None], name='q_estimates')\n",
        "    if FLAGS.scheduled_sampling:\n",
        "      self._sampling_probability = tf.placeholder(tf.float32, None, name='sampling_probability')\n",
        "      self._alpha = tf.placeholder(tf.float32, None, name='alpha')\n",
        "\n",
        "    # decoder part\n",
        "    self._dec_batch = tf.placeholder(tf.int32, [hps.batch_size, hps.max_dec_steps], name='dec_batch')\n",
        "    self._target_batch = tf.placeholder(tf.int32, [hps.batch_size, hps.max_dec_steps], name='target_batch')\n",
        "    self._dec_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, hps.max_dec_steps], name='dec_padding_mask')\n",
        "\n",
        "    if hps.mode == \"decode\":\n",
        "      if hps.coverage:\n",
        "        self.prev_coverage = tf.placeholder(tf.float32, [hps.batch_size, None], name='prev_coverage')\n",
        "      if hps.intradecoder:\n",
        "        self.prev_decoder_outputs = tf.placeholder(tf.float32, [None, hps.batch_size, hps.dec_hidden_dim], name='prev_decoder_outputs')\n",
        "      if hps.use_temporal_attention:\n",
        "        self.prev_encoder_es = tf.placeholder(tf.float32, [None, hps.batch_size, None], name='prev_encoder_es')\n",
        "\n",
        "  def _make_feed_dict(self, batch, just_enc=False):\n",
        "    \"\"\"Make a feed dictionary mapping parts of the batch to the appropriate placeholders.\n",
        "    Args:\n",
        "      batch: Batch object\n",
        "      just_enc: Boolean. If True, only feed the parts needed for the encoder.\n",
        "    \"\"\"\n",
        "    feed_dict = {}\n",
        "    feed_dict[self._enc_batch] = batch.enc_batch\n",
        "    feed_dict[self._enc_lens] = batch.enc_lens\n",
        "    feed_dict[self._enc_padding_mask] = batch.enc_padding_mask\n",
        "    if FLAGS.pointer_gen:\n",
        "      feed_dict[self._enc_batch_extend_vocab] = batch.enc_batch_extend_vocab\n",
        "      feed_dict[self._max_art_oovs] = batch.max_art_oovs\n",
        "    if not just_enc:\n",
        "      feed_dict[self._dec_batch] = batch.dec_batch\n",
        "      feed_dict[self._target_batch] = batch.target_batch\n",
        "      feed_dict[self._dec_padding_mask] = batch.dec_padding_mask\n",
        "    return feed_dict\n",
        "\n",
        "  def _add_encoder(self, emb_enc_inputs, seq_len):\n",
        "    \"\"\"Add a single-layer bidirectional LSTM encoder to the graph.\n",
        "    Args:\n",
        "      emb_enc_inputs: A tensor of shape [batch_size, <=max_enc_steps, emb_size].\n",
        "      seq_len: Lengths of emb_enc_inputs (before padding). A tensor of shape [batch_size].\n",
        "    Returns:\n",
        "      encoder_outputs:\n",
        "        A tensor of shape [batch_size, <=max_enc_steps, 2*hidden_dim]. It's 2*hidden_dim because it's the concatenation of the forwards and backwards states.\n",
        "      fw_state, bw_state:\n",
        "        Each are LSTMStateTuples of shape ([batch_size,hidden_dim],[batch_size,hidden_dim])\n",
        "    \"\"\"\n",
        "    with tf.variable_scope('encoder'):\n",
        "      cell_fw = tf.contrib.rnn.LSTMCell(self._hps.enc_hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n",
        "      cell_bw = tf.contrib.rnn.LSTMCell(self._hps.enc_hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n",
        "      (encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, emb_enc_inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)\n",
        "      encoder_outputs = tf.concat(axis=2, values=encoder_outputs) # concatenate the forwards and backwards states\n",
        "    return encoder_outputs, fw_st, bw_st\n",
        "\n",
        "  def _reduce_states(self, fw_st, bw_st):\n",
        "    \"\"\"Add to the graph a linear layer to reduce the encoder's final FW and BW state into a single initial state for the decoder. This is needed because the encoder is bidirectional but the decoder is not.\n",
        "    Args:\n",
        "      fw_st: LSTMStateTuple with hidden_dim units.\n",
        "      bw_st: LSTMStateTuple with hidden_dim units.\n",
        "    Returns:\n",
        "      state: LSTMStateTuple with hidden_dim units.\n",
        "    \"\"\"\n",
        "    enc_hidden_dim = self._hps.enc_hidden_dim\n",
        "    dec_hidden_dim = self._hps.dec_hidden_dim\n",
        "\n",
        "    with tf.variable_scope('reduce_final_st'):\n",
        "\n",
        "      # Define weights and biases to reduce the cell and reduce the state\n",
        "      w_reduce_c = tf.get_variable('w_reduce_c', [enc_hidden_dim * 2, dec_hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n",
        "      w_reduce_h = tf.get_variable('w_reduce_h', [enc_hidden_dim * 2, dec_hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n",
        "      bias_reduce_c = tf.get_variable('bias_reduce_c', [dec_hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n",
        "      bias_reduce_h = tf.get_variable('bias_reduce_h', [dec_hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n",
        "\n",
        "      # Apply linear layer\n",
        "      old_c = tf.concat(axis=1, values=[fw_st.c, bw_st.c]) # Concatenation of fw and bw cell\n",
        "      old_h = tf.concat(axis=1, values=[fw_st.h, bw_st.h]) # Concatenation of fw and bw state\n",
        "      new_c = tf.nn.relu(tf.matmul(old_c, w_reduce_c) + bias_reduce_c) # Get new cell from old cell\n",
        "      new_h = tf.nn.relu(tf.matmul(old_h, w_reduce_h) + bias_reduce_h) # Get new state from old state\n",
        "      return tf.contrib.rnn.LSTMStateTuple(new_c, new_h) # Return new cell and state\n",
        "\n",
        "  def _add_decoder(self, emb_dec_inputs, embedding):\n",
        "    \"\"\"Add attention decoder to the graph. In train or eval mode, you call this once to get output on ALL steps. In decode (beam search) mode, you call this once for EACH decoder step.\n",
        "    Args:\n",
        "      emb_dec_inputs: inputs to the decoder (word embeddings). A list of tensors shape (batch_size, emb_dim)\n",
        "      embedding: embedding matrix (vocab_size, emb_dim)\n",
        "    Returns:\n",
        "      outputs: List of tensors; the outputs of the decoder\n",
        "      out_state: The final state of the decoder\n",
        "      attn_dists: A list of tensors; the attention distributions\n",
        "      p_gens: A list of tensors shape (batch_size, 1); the generation probabilities\n",
        "      coverage: A tensor, the current coverage vector\n",
        "    \"\"\"\n",
        "    hps = self._hps\n",
        "    cell = tf.contrib.rnn.LSTMCell(hps.dec_hidden_dim, state_is_tuple=True, initializer=self.rand_unif_init)\n",
        "\n",
        "    prev_coverage = self.prev_coverage if (hps.mode==\"decode\" and hps.coverage) else None # In decode mode, we run attention_decoder one step at a time and so need to pass in the previous step's coverage vector each time\n",
        "    prev_decoder_outputs = self.prev_decoder_outputs if (hps.intradecoder and hps.mode==\"decode\") else tf.stack([],axis=0)\n",
        "    prev_encoder_es = self.prev_encoder_es if (hps.use_temporal_attention and hps.mode==\"decode\") else tf.stack([],axis=0)\n",
        "    return attention_decoder(hps,\n",
        "      self._vocab.size(),\n",
        "      self._max_art_oovs,\n",
        "      self._enc_batch_extend_vocab,\n",
        "      emb_dec_inputs,\n",
        "      self._target_batch,\n",
        "      self._dec_in_state,\n",
        "      self._enc_states,\n",
        "      self._enc_padding_mask,\n",
        "      self._dec_padding_mask,\n",
        "      cell,\n",
        "      embedding,\n",
        "      self._sampling_probability if FLAGS.scheduled_sampling else 0,\n",
        "      self._alpha if FLAGS.E2EBackProp else 0,\n",
        "      self._vocab.word2id(UNKNOWN_TOKEN),\n",
        "      initial_state_attention=(hps.mode==\"decode\"),\n",
        "      pointer_gen=hps.pointer_gen,\n",
        "      use_coverage=hps.coverage,\n",
        "      prev_coverage=prev_coverage,\n",
        "      prev_decoder_outputs=prev_decoder_outputs,\n",
        "      prev_encoder_es = prev_encoder_es)\n",
        "\n",
        "  def _add_emb_vis(self, embedding_var):\n",
        "    \"\"\"Do setup so that we can view word embedding visualization in Tensorboard, as described here:\n",
        "    https://www.tensorflow.org/get_started/embedding_viz\n",
        "    Make the vocab metadata file, then make the projector config file pointing to it.\"\"\"\n",
        "    train_dir = os.path.join(FLAGS.log_root, \"train\")\n",
        "    vocab_metadata_path = os.path.join(train_dir, \"vocab_metadata.tsv\")\n",
        "    self._vocab.write_metadata(vocab_metadata_path) # write metadata file\n",
        "    summary_writer = tf.summary.FileWriter(train_dir)\n",
        "    config = projector.ProjectorConfig()\n",
        "    embedding = config.embeddings.add()\n",
        "    embedding.tensor_name = embedding_var.name\n",
        "    embedding.metadata_path = vocab_metadata_path\n",
        "    projector.visualize_embeddings(summary_writer, config)\n",
        "\n",
        "  def discount_rewards(self, r):\n",
        "    \"\"\" take a list of size max_dec_step * (batch_size, k) and return a list of the same size \"\"\"\n",
        "    discounted_r = []\n",
        "    running_add = tf.constant(0, tf.float32)\n",
        "    for t in reversed(range(0, len(r))):\n",
        "      running_add = running_add * self._hps.gamma + r[t] # rd_t = r_t + gamma * r_{t+1}\n",
        "      discounted_r.append(running_add)\n",
        "    discounted_r = tf.stack(discounted_r[::-1]) # (max_dec_step, batch_size, k)\n",
        "    normalized_discounted_r = tf.nn.l2_normalize(discounted_r, axis=0)\n",
        "    return tf.unstack(normalized_discounted_r) # list of max_dec_step * (batch_size, k)\n",
        "\n",
        "  def intermediate_rewards(self, r):\n",
        "    \"\"\" take a list of size max_dec_step * (batch_size, k) and return a list of the same size\n",
        "        uses the intermediate reward as proposed by: R_t = r_t - r_{t-1} \"\"\"\n",
        "    intermediate_r = []\n",
        "    intermediate_r.append(r[0])\n",
        "    for t in range(1, len(r)):\n",
        "      intermediate_r.append(r[t]-r[t-1])\n",
        "    return intermediate_r # list of max_dec_step * (batch_size, k)\n",
        "\n",
        "  def _add_seq2seq(self):\n",
        "    \"\"\"Add the whole sequence-to-sequence model to the graph.\"\"\"\n",
        "    hps = self._hps\n",
        "    vsize = self._vocab.size() # size of the vocabulary\n",
        "\n",
        "    with tf.variable_scope('seq2seq'):\n",
        "      # Some initializers\n",
        "      self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)\n",
        "      self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)\n",
        "\n",
        "      # Add embedding matrix (shared by the encoder and decoder inputs)\n",
        "      with tf.variable_scope('embedding'):\n",
        "        if FLAGS.embedding != \"\":\n",
        "          embedding = tf.Variable(self.embedding_place)\n",
        "        else:\n",
        "          embedding = tf.get_variable('embedding', [vsize, hps.emb_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n",
        "        ##if hps.mode==\"train\": self._add_emb_vis(embedding) # add to tensorboard\n",
        "        emb_enc_inputs = tf.nn.embedding_lookup(embedding, self._enc_batch) # tensor with shape (batch_size, max_enc_steps, emb_size)\n",
        "        emb_dec_inputs = [tf.nn.embedding_lookup(embedding, x) for x in tf.unstack(self._dec_batch, axis=1)] # list length max_dec_steps containing shape (batch_size, emb_size)\n",
        "\n",
        "      # Add the encoder.\n",
        "      enc_outputs, fw_st, bw_st = self._add_encoder(emb_enc_inputs, self._enc_lens)\n",
        "      self._enc_states = enc_outputs\n",
        "\n",
        "      # Our encoder is bidirectional and our decoder is unidirectional so we need to reduce the final encoder hidden state to the right size to be the initial decoder hidden state\n",
        "      self._dec_in_state = self._reduce_states(fw_st, bw_st)\n",
        "\n",
        "      # Add the decoder.\n",
        "      with tf.variable_scope('decoder'):\n",
        "        (self.decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, self.vocab_scores,\n",
        "         self.final_dists, self.samples, self.greedy_search_samples, self.temporal_es,\n",
        "         self.sampling_rewards, self.greedy_rewards) = self._add_decoder(emb_dec_inputs, embedding)\n",
        "\n",
        "      if FLAGS.use_discounted_rewards and hps.rl_training and hps.mode in ['train', 'eval']:\n",
        "        # Get the sampled and greedy sentence from model output\n",
        "        # self.samples: (max_dec_steps, batch_size, k)\n",
        "        self.sampling_discounted_rewards = tf.stack(self.discount_rewards(tf.unstack(self.sampling_rewards))) # list of max_dec_steps * (batch_size, k)\n",
        "        self.greedy_discounted_rewards = tf.stack(self.discount_rewards(tf.unstack(self.greedy_rewards))) # list of max_dec_steps * (batch_size, k)\n",
        "      elif FLAGS.use_intermediate_rewards and hps.rl_training and hps.mode in ['train', 'eval']:\n",
        "        # Get the sampled and greedy sentence from model output\n",
        "        # self.samples: (max_dec_steps, batch_size, k)\n",
        "        self.sampling_discounted_rewards = tf.stack(self.intermediate_rewards(tf.unstack(self.sampling_rewards))) # list of max_dec_steps * (batch_size, k)\n",
        "        self.greedy_discounted_rewards = tf.stack(self.intermediate_rewards(tf.unstack(self.greedy_rewards))) # list of max_dec_steps * (batch_size, k)\n",
        "      elif hps.ac_training and hps.mode in ['train', 'eval']:\n",
        "        # Get the sampled and greedy sentence from model output\n",
        "        self.sampled_sentences = tf.transpose(tf.stack(self.samples), perm=[1,2,0]) # (batch_size, k, <=max_dec_steps) word indices\n",
        "        self.greedy_search_sentences = tf.transpose(tf.stack(self.greedy_search_samples), perm=[1,2,0]) # (batch_size, k, <=max_dec_steps) word indices\n",
        "\n",
        "    if hps.mode == \"decode\":\n",
        "      # We run decode beam search mode one decoder step at a time\n",
        "      assert len(self.final_dists)==1 # final_dists is a singleton list containing shape (batch_size, extended_vsize)\n",
        "      self.final_dists = self.final_dists[0]\n",
        "      topk_probs, self._topk_ids = tf.nn.top_k(self.final_dists, hps.batch_size*2) # take the k largest probs. note batch_size=beam_size in decode mode\n",
        "      self._topk_log_probs = tf.log(topk_probs)\n",
        "\n",
        "  def _add_shared_loss_op(self):\n",
        "    # Calculate the loss\n",
        "    with tf.variable_scope('shared_loss'):\n",
        "      # Calculate the loss per step\n",
        "      # This is fiddly; we use tf.gather_nd to pick out the probabilities of the gold target words\n",
        "      #### added by yaserkl@vt.edu: we just calculate these to monitor pgen_loss throughout time\n",
        "      loss_per_step = [] # will be list length max_dec_steps containing shape (batch_size)\n",
        "      batch_nums = tf.range(0, limit=self._hps.batch_size) # shape (batch_size)\n",
        "      for dec_step, dist in enumerate(self.final_dists):\n",
        "        targets = self._target_batch[:,dec_step] # The indices of the target words. shape (batch_size)\n",
        "        indices = tf.stack( (batch_nums, targets), axis=1) # shape (batch_size, 2)\n",
        "        gold_probs = tf.gather_nd(dist, indices) # shape (batch_size). prob of correct words on this step\n",
        "        losses = -tf.log(gold_probs)\n",
        "        loss_per_step.append(losses)\n",
        "      self._pgen_loss = _mask_and_avg(loss_per_step, self._dec_padding_mask)\n",
        "      self.variable_summaries('pgen_loss', self._pgen_loss)\n",
        "      # Adding Q-Estimation to CE loss in Actor-Critic Model\n",
        "      if self._hps.ac_training:\n",
        "        # Calculating Actor-Critic loss\n",
        "        # Here, we multiple the Q-estimation for each token to its respective probability\n",
        "        loss_per_step = [] # will be list length k each containing a list of shape <=max_dec_steps which each has the shape (batch_size)\n",
        "        q_loss_per_step = [] # will be list length k each containing a list of shape <=max_dec_steps which each has the shape (batch_size)\n",
        "        batch_nums = tf.range(0, limit=self._hps.batch_size) # shape (batch_size)\n",
        "        unstacked_q = tf.unstack(self._q_estimates, axis =1) # list of k with size (batch_size, <=max_dec_steps, vsize_extended)\n",
        "        for sample_id in range(self._hps.k):\n",
        "          loss_per_sample = [] # length <=max_dec_steps of batch_sizes\n",
        "          q_loss_per_sample = [] # length <=max_dec_steps of batch_sizes\n",
        "          q_val_per_sample = tf.unstack(unstacked_q[sample_id], axis =1) # list of <=max_dec_step (batch_size, vsize_extended)\n",
        "          for dec_step, (dist, q_value) in enumerate(zip(self.final_dists, q_val_per_sample)):\n",
        "            targets = tf.squeeze(self.samples[dec_step][:,sample_id]) # The indices of the sampled words. shape (batch_size)\n",
        "            indices = tf.stack((batch_nums, targets), axis=1) # shape (batch_size, 2)\n",
        "            gold_probs = tf.gather_nd(dist, indices) # shape (batch_size). prob of correct words on this step\n",
        "            losses = -tf.log(gold_probs)\n",
        "            dist_q_val = -tf.log(dist) * q_value\n",
        "            q_losses = tf.gather_nd(dist_q_val, indices) # shape (batch_size). prob of correct words on this step\n",
        "            loss_per_sample.append(losses)\n",
        "            q_loss_per_sample.append(q_losses)\n",
        "          loss_per_step.append(loss_per_sample)\n",
        "          q_loss_per_step.append(q_loss_per_sample)\n",
        "        with tf.variable_scope('reinforce_loss'):\n",
        "          #### this is the actual loss\n",
        "          self._rl_avg_logprobs = tf.reduce_mean([_mask_and_avg(loss_per_sample, self._dec_padding_mask) for loss_per_sample in loss_per_step])\n",
        "          self._rl_loss = tf.reduce_mean([_mask_and_avg(q_loss_per_sample, self._dec_padding_mask) for q_loss_per_sample in q_loss_per_step])\n",
        "          # Eq. 34 in https://arxiv.org/pdf/1805.09461.pdf\n",
        "          self._reinforce_shared_loss = self._eta * self._rl_loss + (tf.constant(1.,dtype=tf.float32) - self._eta) * self._pgen_loss # equation 16 in https://arxiv.org/pdf/1705.04304.pdf\n",
        "          #### the following is only for monitoring purposes\n",
        "          self.variable_summaries('reinforce_avg_logprobs', self._rl_avg_logprobs)\n",
        "          self.variable_summaries('reinforce_loss', self._rl_loss)\n",
        "          self.variable_summaries('reinforce_shared_loss', self._reinforce_shared_loss)\n",
        "\n",
        "      # Adding Self-Critic Reward to CE loss in Policy-Gradient Model\n",
        "      if self._hps.rl_training:\n",
        "        #### Calculating the reinforce loss according to Eq. 15 in https://arxiv.org/pdf/1705.04304.pdf\n",
        "        loss_per_step = [] # will be list length max_dec_steps*k containing shape (batch_size)\n",
        "        rl_loss_per_step = [] # will be list length max_dec_steps*k containing shape (batch_size)\n",
        "        batch_nums = tf.range(0, limit=self._hps.batch_size) # shape (batch_size)\n",
        "        self._sampled_rouges = []\n",
        "        self._greedy_rouges = []\n",
        "        self._reward_diff = []\n",
        "        for _ in range(self._hps.k):\n",
        "          if FLAGS.use_discounted_rewards or FLAGS.use_intermediate_rewards:\n",
        "            self._sampled_rouges.append(self.sampling_discounted_rewards[:, :, _]) # shape (max_enc_steps, batch_size)\n",
        "            self._greedy_rouges.append(self.greedy_discounted_rewards[:, :, _]) # shape (max_enc_steps, batch_size)\n",
        "          else:\n",
        "            # use the reward of last step, since we use the reward of the whole sentence in this case\n",
        "            self._sampled_rouges.append(self.sampling_rewards[:, _]) # shape (batch_size)\n",
        "            self._greedy_rouges.append(self.greedy_rewards[:, _]) # shape (batch_size)\n",
        "          if FLAGS.self_critic:\n",
        "            self._reward_diff.append(self._greedy_rouges[_]-self._sampled_rouges[_])\n",
        "          else:\n",
        "            self._reward_diff.append(self._sampled_rouges[_])\n",
        "        for dec_step, dist in enumerate(self.final_dists):\n",
        "          _targets = self.samples[dec_step] # The indices of the sampled words. shape (batch_size, k)\n",
        "          for _k, targets in enumerate(tf.unstack(_targets,axis=1)): # list of k samples of size (batch_size)\n",
        "            indices = tf.stack( (batch_nums, targets), axis=1) # shape (batch_size, 2)\n",
        "            gold_probs = tf.gather_nd(dist, indices) # shape (batch_size). prob of correct words on this step\n",
        "            losses = -tf.log(gold_probs)\n",
        "            loss_per_step.append(losses)\n",
        "            # Equation 15 in https://arxiv.org/pdf/1705.04304.pdf\n",
        "            # Equal reward for all tokens\n",
        "            if FLAGS.use_discounted_rewards or FLAGS.use_intermediate_rewards:\n",
        "              rl_losses = -tf.log(gold_probs) * self._reward_diff[_k][dec_step, :]  # positive values\n",
        "            else:\n",
        "              rl_losses = -tf.log(gold_probs) * self._reward_diff[_k] # positive values\n",
        "            rl_loss_per_step.append(rl_losses)\n",
        "\n",
        "        # new size: (k, max_dec_steps, batch_size)\n",
        "        rl_loss_per_step = tf.unstack(\n",
        "          tf.transpose(tf.reshape(rl_loss_per_step, [-1, self._hps.k, self._hps.batch_size]),perm=[1,0,2]))\n",
        "        loss_per_step = tf.unstack(\n",
        "          tf.transpose(tf.reshape(loss_per_step, [-1, self._hps.k, self._hps.batch_size]), perm=[1, 0, 2]))\n",
        "\n",
        "        if FLAGS.use_intermediate_rewards:\n",
        "          self._sampled_rouges = tf.reduce_sum(self._sampled_rouges, axis=1) # shape (k, batch_size)\n",
        "          self._greedy_rouges = tf.reduce_sum(self._greedy_rouges, axis=1) # shape (k, batch_size)\n",
        "          self._reward_diff = tf.reduce_sum(self._reward_diff, axis=1) # shape (k, batch_size)\n",
        "\n",
        "        with tf.variable_scope('reinforce_loss'):\n",
        "          self._rl_avg_logprobs = []\n",
        "          self._rl_loss = []\n",
        "\n",
        "          for _k in range(self._hps.k):\n",
        "            self._rl_avg_logprobs.append(_mask_and_avg(tf.unstack(loss_per_step[_k]), self._dec_padding_mask))\n",
        "            self._rl_loss.append(_mask_and_avg(tf.unstack(tf.reshape(rl_loss_per_step[_k], [self._hps.max_dec_steps, self._hps.batch_size])), self._dec_padding_mask))\n",
        "\n",
        "          self._rl_avg_logprobs = tf.reduce_mean(self._rl_avg_logprobs)\n",
        "          self._rl_loss = tf.reduce_mean(self._rl_loss)\n",
        "          # We multiply the ROUGE difference of sampling vs greedy sentence to the loss of all tokens in the sequence\n",
        "          # Eq. 16 in https://arxiv.org/pdf/1705.04304.pdf and Eq. 34 in https://arxiv.org/pdf/1805.09461.pdf\n",
        "          self._reinforce_shared_loss = self._eta * self._rl_loss + (tf.constant(1.,dtype=tf.float32) - self._eta) * self._pgen_loss\n",
        "          #### the following is only for monitoring purposes\n",
        "          self.variable_summaries('reinforce_avg_logprobs', self._rl_avg_logprobs)\n",
        "          self.variable_summaries('reinforce_loss', self._rl_loss)\n",
        "          self.variable_summaries('reinforce_sampled_r_value', tf.reduce_mean(self._sampled_rouges))\n",
        "          self.variable_summaries('reinforce_greedy_r_value', tf.reduce_mean(self._greedy_rouges))\n",
        "          self.variable_summaries('reinforce_r_diff', tf.reduce_mean(self._reward_diff))\n",
        "          self.variable_summaries('reinforce_shared_loss', self._reinforce_shared_loss)\n",
        "\n",
        "      # Calculate coverage loss from the attention distributions\n",
        "      if self._hps.coverage:\n",
        "        with tf.variable_scope('coverage_loss'):\n",
        "          self._coverage_loss = _coverage_loss(self.attn_dists, self._dec_padding_mask)\n",
        "          self.variable_summaries('coverage_loss', self._coverage_loss)\n",
        "        if self._hps.rl_training or self._hps.ac_training:\n",
        "          with tf.variable_scope('reinforce_loss'):\n",
        "            self._reinforce_cov_total_loss = self._reinforce_shared_loss + self._hps.cov_loss_wt * self._coverage_loss\n",
        "            self.variable_summaries('reinforce_coverage_loss', self._reinforce_cov_total_loss)\n",
        "        if self._hps.pointer_gen:\n",
        "          self._pointer_cov_total_loss = self._pgen_loss + self._hps.cov_loss_wt * self._coverage_loss\n",
        "          self.variable_summaries('pointer_coverage_loss', self._pointer_cov_total_loss)\n",
        "\n",
        "  def _add_shared_train_op(self):\n",
        "    \"\"\"Sets self._train_op, the op to run for training.\"\"\"\n",
        "    # Take gradients of the trainable variables w.r.t. the loss function to minimize\n",
        "    if self._hps.rl_training or self._hps.ac_training:\n",
        "      loss_to_minimize = self._reinforce_shared_loss\n",
        "      if self._hps.coverage:\n",
        "        loss_to_minimize = self._reinforce_cov_total_loss\n",
        "    else:\n",
        "      loss_to_minimize = self._pgen_loss\n",
        "      if self._hps.coverage:\n",
        "        loss_to_minimize = self._pointer_cov_total_loss\n",
        "\n",
        "    tvars = tf.trainable_variables()\n",
        "    gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)\n",
        "\n",
        "    # Clip the gradients\n",
        "    with tf.device(\"/gpu:{}\".format(self._hps.gpu_num)):\n",
        "      grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)\n",
        "\n",
        "    # Add a summary\n",
        "    tf.summary.scalar('global_norm', global_norm)\n",
        "\n",
        "    # Apply adagrad optimizer\n",
        "    optimizer = tf.train.AdagradOptimizer(self._hps.lr, initial_accumulator_value=self._hps.adagrad_init_acc)\n",
        "    with tf.device(\"/gpu:{}\".format(self._hps.gpu_num)):\n",
        "      self._shared_train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')\n",
        "\n",
        "  def build_graph(self):\n",
        "    \"\"\"Add the placeholders, model, global step, train_op and summaries to the graph\"\"\"\n",
        "    tf.logging.info('Building graph...')\n",
        "    t0 = time.time()\n",
        "    self.global_step = tf.Variable(0, name='global_step', trainable=False)\n",
        "    self._add_placeholders()\n",
        "    with tf.device(\"/gpu:{}\".format(self._hps.gpu_num)):\n",
        "      self._add_seq2seq()\n",
        "      if self._hps.mode in ['train', 'eval']:\n",
        "        self._add_shared_loss_op()\n",
        "      if self._hps.mode == 'train':\n",
        "        self._add_shared_train_op()\n",
        "      self._summaries = tf.summary.merge_all()\n",
        "    t1 = time.time()\n",
        "    tf.logging.info('Time to build graph: %i seconds', t1 - t0)\n",
        "\n",
        "  def collect_dqn_transitions(self, sess, batch, step, max_art_oovs):\n",
        "    \"\"\"Get decoders' output and calculate reward at each decoding step, Q-function, value-function, and Advantage function.\n",
        "    Args:\n",
        "      sess: seq2seq model session\n",
        "      batch: current batch\n",
        "      step: training step\n",
        "      max_art_oovs: number of OOV tokens in current batch.\n",
        "    Returns:\n",
        "      transitions:\n",
        "        Experiences collected from decoders' outputs. (batch_size, k, max_dec_steps)\n",
        "    \"\"\"\n",
        "\n",
        "    feed_dict = self._make_feed_dict(batch)\n",
        "    if self._hps.fixed_eta:\n",
        "      feed_dict[self._eta] = self._hps.eta\n",
        "    else:\n",
        "      feed_dict[self._eta] = min(step * self._hps.eta,1.)\n",
        "    if self._hps.scheduled_sampling:\n",
        "      if self._hps.fixed_sampling_probability:\n",
        "        feed_dict[self._sampling_probability] = self._hps.sampling_probability\n",
        "      else:\n",
        "        feed_dict[self._sampling_probability] = min(step * self._hps.sampling_probability,1.) # linear decay function\n",
        "      ranges = [np.exp(float(step) * self._hps.alpha),np.finfo(np.float64).max] # to avoid overflow\n",
        "      feed_dict[self._alpha] = np.log(ranges[np.argmin(ranges)]) # linear decay function\n",
        "\n",
        "    vsize_extended = self._vocab.size() + max_art_oovs\n",
        "    if self._hps.calculate_true_q:\n",
        "      self.advantages = np.zeros((self._hps.batch_size, self._hps.k, self._hps.max_dec_steps, vsize_extended),dtype=np.float32) # (batch_size, k, <=max_dec_steps,vocab_size)\n",
        "      self.q_values = np.zeros((self._hps.batch_size, self._hps.k, self._hps.max_dec_steps, vsize_extended),dtype=np.float32) # (batch_size, k, <=max_dec_steps,vocab_size)\n",
        "      self.r_values = np.zeros((self._hps.batch_size, self._hps.k, self._hps.max_dec_steps, vsize_extended),dtype=np.float32) # (batch_size, k, <=max_dec_steps,vocab_size)\n",
        "      self.v_values = np.zeros((self._hps.batch_size, self._hps.k, self._hps.max_dec_steps),dtype=np.float32) # (batch_size, k, <=max_dec_steps)\n",
        "    else:\n",
        "      self.r_values = np.zeros((self._hps.batch_size, self._hps.k, self._hps.max_dec_steps),dtype=np.float32) # (batch_size, k, <=max_dec_steps)\n",
        "    to_return = {\n",
        "      'sampled_sentences': self.sampled_sentences,\n",
        "      'greedy_search_sentences': self.greedy_search_sentences,\n",
        "      'decoder_outputs': self.decoder_outputs,\n",
        "    }\n",
        "    # Run the seq2seq model to get the decoders' output.\n",
        "    ret_dict = sess.run(to_return, feed_dict)\n",
        "\n",
        "\n",
        "    # Calculating reward, Q, V, and A values\n",
        "    _t = time.time()\n",
        "    ### TODO: do it in parallel???\n",
        "    for _n, (sampled_sentence, greedy_search_sentence, target_sentence) in enumerate(zip(ret_dict['sampled_sentences'],ret_dict['greedy_search_sentences'], batch.target_batch)): # run batch_size time\n",
        "      _gts = target_sentence\n",
        "      for i in range(self._hps.k):\n",
        "        _ss = greedy_search_sentence[i] # reward is calculated over the best action through greedy search\n",
        "        if self._hps.calculate_true_q:\n",
        "          # Collect Reward, Q, V, and A only when we want to train DDQN using true Q-estimation\n",
        "          A, Q, V, R = self.caluclate_advantage_function(_ss, _gts, vsize_extended)\n",
        "          self.r_values[_n,i,:,:] = R\n",
        "          self.advantages[_n,i,:,:] = A\n",
        "          self.q_values[_n,i,:,:] = Q\n",
        "          self.v_values[_n,i,:] = V\n",
        "        else:\n",
        "          # if using DDQN estimates, we only need to calculate the reward and later on estimate Q values.\n",
        "          self.r_values[_n, i, :] = self.caluclate_single_reward(_ss, _gts) # len max_dec_steps\n",
        "    tf.logging.info('seconds for dqn collection: {}'.format(time.time()-_t))\n",
        "    trasitions = self.prepare_dqn_transitions(self._hps, ret_dict['decoder_outputs'], ret_dict['greedy_search_sentences'], vsize_extended)\n",
        "    return trasitions\n",
        "\n",
        "  def caluclate_advantage_function(self, _ss, _gts, vsize_extended):\n",
        "    \"\"\"Collect R, Q, V, and A for the given sequence of ground-truth and generated summary\n",
        "    Args:\n",
        "      _ss: A list of generated tokens (max_dec_steps) \n",
        "      _gts: A list of ground-truth tokens (max_dec_steps)\n",
        "      vsize_extended: size of the extended vocab, vocab_size + max_art_oovs\n",
        "    Returns:\n",
        "      R: Reward values (max_dec_steps, vsize_extended)\n",
        "      Q: Q-values (max_dec_steps, vsize_extended)\n",
        "      V: Value function (max_dec_steps, vsize_extended)\n",
        "      A: Advantage values (max_dec_steps, vsize_extended)\n",
        "      \n",
        "    \"\"\"\n",
        "\n",
        "    R = np.zeros((self._hps.max_dec_steps, vsize_extended)) # shape (max_dec_steps, vocab_size)\n",
        "    Q = np.zeros((self._hps.max_dec_steps, vsize_extended)) # shape (max_dec_steps, vocab_size)\n",
        "    for t in range(self._hps.max_dec_steps,0,-1):\n",
        "      R[t-1][:] = self.reward(t, _ss, _gts, vsize_extended)\n",
        "      # We find true Q-values.\n",
        "      # Eq. 30 in https://arxiv.org/pdf/1805.09461.pdf\n",
        "      try:\n",
        "        Q[t-1][:] = R[t-1][:] + self._hps.gamma * Q[t,:].max()\n",
        "      except:\n",
        "        Q[t-1][:] = R[t-1][:]\n",
        "\n",
        "    V = np.reshape(np.mean(Q,axis=1),[-1,1])\n",
        "\n",
        "    A = Q - V\n",
        "    return A, Q, np.squeeze(V), R\n",
        "\n",
        "  def caluclate_single_reward(self, _ss, _gts):\n",
        "    \"\"\"Calculate the reward based on the reference and summary\n",
        "    Args:\n",
        "      _ss: List of model-generated tokens of size max_dec_steps\n",
        "      _gts: List of ground-truth tokens of size max_dec_steps\n",
        "    Returns:\n",
        "      reward:\n",
        "        List of the collected reward for each decoding step.\n",
        "    \"\"\"\n",
        "\n",
        "    return [self.calc_reward(t, _ss,_gts) for t in range(1,self._hps.max_dec_steps+1)]\n",
        "\n",
        "  def prepare_dqn_transitions(self, hps, decoder_states, greedy_samples, vsize_extended):\n",
        "    \"\"\"Prepare the experiences for this batch\n",
        "    Args:\n",
        "      hps: model paramters\n",
        "      decoder_states: decode output states (max_dec_steps, batch_size, hidden_dim)\n",
        "      greedy_samples: set of tokens selected through greedy selection, list of size batch_size each contains\n",
        "      max_dec_steps tokens.\n",
        "    Returns:\n",
        "      transitions:\n",
        "        List of experiences collected for this batch (batch_size, k, max_dec_steps)\n",
        "    \"\"\"\n",
        "    # all variables must have the shape (batch_size, k, <=max_dec_steps, feature_len)\n",
        "    decoder_states = np.transpose(np.stack(decoder_states),[1,0,2]) # now of shape (batch_size, <=max_dec_steps, hidden_dim)\n",
        "    greedy_samples = np.stack(greedy_samples) # now of shape (batch_size, <=max_dec_steps)\n",
        "\n",
        "    dec_length = decoder_states.shape[1]\n",
        "    hidden_dim = decoder_states.shape[-1]\n",
        "\n",
        "    # modifying decoder state tensor to shape (batch_size, k, <=max_dec_steps, hidden_dim)\n",
        "    _decoder_states = np.expand_dims(decoder_states, 1)\n",
        "    _decoder_states = np.concatenate([_decoder_states] * hps.k, axis=1) # shape (batch_size, k, <=max_dec_steps, hidden_dim)\n",
        "    # TODO: if wanna use time as a categorical feature\n",
        "    #features = np.concatenate([self.times, _decoder_states], axis=-1) # shape (batch_size, k, <=max_dec_steps, hidden_dim + <=max_dec_steps)\n",
        "    features = _decoder_states # shape (batch_size, k, <=max_dec_steps, hidden_dim)\n",
        "\n",
        "    q_func = lambda i,k,t: self.q_values[i,k,t] # (vsize_extended)\n",
        "    zero_func = lambda i, k, t: np.zeros((vsize_extended))\n",
        "    raction_func = lambda i,k,t,action: self.r_values[i,k,t,action]\n",
        "    r_func = lambda i,k,t,action: self.r_values[i,k,t]\n",
        "\n",
        "    if self._hps.calculate_true_q:\n",
        "      # We use the true q_values that we calculated to train DQN network.\n",
        "      pass_q_func = q_func\n",
        "      pass_r_func = raction_func\n",
        "    else:\n",
        "      # We update the q_values later, after collecting the q_estimates from DQN network.\n",
        "      pass_q_func = zero_func\n",
        "      pass_r_func = r_func\n",
        "\n",
        "    transitions = [] # (h_t, w_t, h_{t+1}, r_t, q_t, done)\n",
        "    for i in range(self._hps.batch_size):\n",
        "      for k in range(self._hps.k):\n",
        "        for t in range(self._hps.max_dec_steps):\n",
        "          action = greedy_samples[i,k,t]\n",
        "          done = (t==(self._hps.max_dec_steps-1) or action==3) # 3 is the id for [STOP] in our vocabularity to stop decoding\n",
        "          state = features[i, k, t]\n",
        "          if done:\n",
        "            state_prime = np.zeros((features.shape[-1]))\n",
        "            action_prime = 3 # 3 is the id for [STOP] in our vocabularity to stop decoding\n",
        "          else:\n",
        "            state_prime = features[i,k,t+1]\n",
        "            action_prime = greedy_samples[i,k,t+1]\n",
        "          transitions.append(Transition(state, action, state_prime, action_prime, pass_r_func(i,k,t,action), pass_q_func(i,k,t), done))\n",
        "\n",
        "    return transitions\n",
        "\n",
        "  def calc_reward(self, _ss, _gts): # optimizing based on ROUGE-L\n",
        "    \"\"\"This function will calculate partial reward, meaning we calculate the reward using\n",
        "    reward_function(_ss[0:t], _gts). Therefore if we have the following two inputs:\n",
        "    _ss = [A, B, C, D, E]\n",
        "    _gts = [A, B, D, E, F]\n",
        "    and we want to collect the reward based on ROUGE_L at time t = 2, it will be as follows:\n",
        "    ROUGE_L([A, B, C, [UNK], [UNK]], [A, B, D, E, F])\n",
        "    Note that we replace all tokens for time t>2 with [UNK]\n",
        "    Args:\n",
        "      t: decoder time step\n",
        "      _ss: List of model-generated tokens of size max_dec_steps\n",
        "      _gts: List of ground-truth tokens of size max_dec_steps\n",
        "    Returns:\n",
        "      reward: The calculated reward \n",
        "    \"\"\"\n",
        "\n",
        "    summary = ' '.join([str(k) for k in _ss])\n",
        "    reference = ' '.join([str(k) for k in _gts])\n",
        "    reward = self.reward_function(reference, summary, self._hps.reward_function)\n",
        "    return reward\n",
        "\n",
        "  def reward(self, t, _ss, _gts, vsize_extended): # shape (vocab_size)\n",
        "    \"\"\" A wrapper for calculating the reward. \"\"\"\n",
        "    first_case = np.append(_ss[0:t],[0]) # our special character is '[UNK]' which has the id of 0 in our vocabulary\n",
        "    special_reward = self.calc_reward(first_case, _gts)\n",
        "    reward = [special_reward for _ in range(vsize_extended)]\n",
        "    # change the ground-truth reward\n",
        "    second_case = np.append(_ss[0:t],[_gts[t-1]])\n",
        "    reward[_gts[t-1]] = self.calc_reward(second_case, _gts)\n",
        "\n",
        "    return reward\n",
        "\n",
        "  def run_train_steps(self, sess, batch, step, q_estimates=None):\n",
        "    \"\"\" Run train steps\n",
        "    Args:\n",
        "      sess: seq2seq session\n",
        "      batch: current batch\n",
        "      step: training step\n",
        "      q_estimates = if using Actor-Critic model, this variable will feed\n",
        "      the Q-estimates collected from Critic and use it to update the model\n",
        "      loss\n",
        "    \"\"\"\n",
        "    feed_dict = self._make_feed_dict(batch)\n",
        "    if self._hps.ac_training or self._hps.rl_training:\n",
        "      if self._hps.fixed_eta:\n",
        "        feed_dict[self._eta] = self._hps.eta\n",
        "      else:\n",
        "        feed_dict[self._eta] = min(step * self._hps.eta, 1.)\n",
        "    if self._hps.scheduled_sampling:\n",
        "      if self._hps.fixed_sampling_probability:\n",
        "        feed_dict[self._sampling_probability] = self._hps.sampling_probability\n",
        "      else:\n",
        "        feed_dict[self._sampling_probability] = min(step * self._hps.sampling_probability, 1.) # linear decay function\n",
        "      ranges = [np.exp(float(step) * self._hps.alpha), np.finfo(np.float64).max] # to avoid overflow\n",
        "      feed_dict[self._alpha] = np.log(ranges[np.argmin(ranges)]) # linear decay function\n",
        "    if self._hps.ac_training:\n",
        "      self.q_estimates = q_estimates\n",
        "      feed_dict[self._q_estimates]= self.q_estimates\n",
        "    to_return = {\n",
        "        'train_op': self._shared_train_op,\n",
        "        'summaries': self._summaries,\n",
        "        'pgen_loss': self._pgen_loss,\n",
        "        'global_step': self.global_step,\n",
        "        'decoder_outputs': self.decoder_outputs\n",
        "    }\n",
        "\n",
        "    if self._hps.rl_training:\n",
        "      to_return['sampled_sentence_r_values'] = self._sampled_rouges\n",
        "      to_return['greedy_sentence_r_values'] = self._greedy_rouges\n",
        "\n",
        "    if self._hps.coverage:\n",
        "      to_return['coverage_loss'] = self._coverage_loss\n",
        "      if self._hps.rl_training or self._hps.ac_training:\n",
        "        to_return['reinforce_cov_total_loss']= self._reinforce_cov_total_loss\n",
        "      if self._hps.pointer_gen:\n",
        "        to_return['pointer_cov_total_loss'] = self._pointer_cov_total_loss\n",
        "    if self._hps.rl_training or self._hps.ac_training:\n",
        "      to_return['shared_loss']= self._reinforce_shared_loss\n",
        "      to_return['rl_loss']= self._rl_loss\n",
        "      to_return['rl_avg_logprobs']= self._rl_avg_logprobs\n",
        "\n",
        "    # We feed the collected reward and feed it back to model to update the loss\n",
        "    return sess.run(to_return, feed_dict)\n",
        "\n",
        "  def run_eval_step(self, sess, batch, step, q_estimates=None):\n",
        "    \"\"\" Run eval steps, same as training with difference that we don't update the loss, here\n",
        "    Args:\n",
        "      sess: seq2seq session\n",
        "      batch: current batch\n",
        "      step: training step\n",
        "      q_estimates = if using Actor-Critic model, this variable will feed\n",
        "      the Q-estimates collected from Critic and use it to update the model\n",
        "      loss\n",
        "    \"\"\"\n",
        "    feed_dict = self._make_feed_dict(batch)\n",
        "    if self._hps.ac_training or self._hps.rl_training:\n",
        "      if self._hps.fixed_eta:\n",
        "        feed_dict[self._eta] = self._hps.eta\n",
        "      else:\n",
        "        feed_dict[self._eta] = min(step * self._hps.eta,1.)\n",
        "    if self._hps.scheduled_sampling:\n",
        "      if self._hps.fixed_sampling_probability:\n",
        "        feed_dict[self._sampling_probability] = self._hps.sampling_probability\n",
        "      else:\n",
        "        feed_dict[self._sampling_probability] = min(step * self._hps.sampling_probability,1.) # linear decay function\n",
        "      ranges = [np.exp(float(step) * self._hps.alpha),np.finfo(np.float64).max] # to avoid overflow\n",
        "      feed_dict[self._alpha] = np.log(ranges[np.argmin(ranges)]) # linear decay function\n",
        "    if self._hps.ac_training:\n",
        "      self.q_estimates = q_estimates\n",
        "      feed_dict[self._q_estimates]= self.q_estimates\n",
        "    to_return = {\n",
        "        'summaries': self._summaries,\n",
        "        'pgen_loss': self._pgen_loss,\n",
        "        'global_step': self.global_step,\n",
        "        'decoder_outputs': self.decoder_outputs\n",
        "    }\n",
        "\n",
        "    if self._hps.rl_training:\n",
        "      to_return['sampled_sentence_r_values'] = self._sampled_rouges\n",
        "      to_return['greedy_sentence_r_values'] = self._greedy_rouges\n",
        "\n",
        "    if self._hps.coverage:\n",
        "      to_return['coverage_loss'] = self._coverage_loss\n",
        "      if self._hps.rl_training or self._hps.ac_training:\n",
        "        to_return['reinforce_cov_total_loss']= self._reinforce_cov_total_loss\n",
        "      if self._hps.pointer_gen:\n",
        "        to_return['pointer_cov_total_loss'] = self._pointer_cov_total_loss\n",
        "    if self._hps.rl_training or self._hps.ac_training:\n",
        "      to_return['shared_loss']= self._reinforce_shared_loss\n",
        "      to_return['rl_loss']= self._rl_loss\n",
        "      to_return['rl_avg_logprobs']= self._rl_avg_logprobs\n",
        "\n",
        "    # We feed the collected reward and feed it back to model to update the loss\n",
        "    return sess.run(to_return, feed_dict)\n",
        "\n",
        "  def run_encoder(self, sess, batch):\n",
        "    \"\"\"For beam search decoding. Run the encoder on the batch and return the encoder states and decoder initial state.\n",
        "    Args:\n",
        "      sess: Tensorflow session.\n",
        "      batch: Batch object that is the same example repeated across the batch (for beam search)\n",
        "    Returns:\n",
        "      enc_states: The encoder states. A tensor of shape [batch_size, <=max_enc_steps, 2*hidden_dim].\n",
        "      dec_in_state: A LSTMStateTuple of shape ([1,hidden_dim],[1,hidden_dim])\n",
        "    \"\"\"\n",
        "    feed_dict = self._make_feed_dict(batch, just_enc=True) # feed the batch into the placeholders\n",
        "    (enc_states, dec_in_state, global_step) = sess.run([self._enc_states, self._dec_in_state, self.global_step], feed_dict) # run the encoder\n",
        "\n",
        "    # dec_in_state is LSTMStateTuple shape ([batch_size,hidden_dim],[batch_size,hidden_dim])\n",
        "    # Given that the batch is a single example repeated, dec_in_state is identical across the batch so we just take the top row.\n",
        "    dec_in_state = tf.contrib.rnn.LSTMStateTuple(dec_in_state.c[0], dec_in_state.h[0])\n",
        "    return enc_states, dec_in_state\n",
        "\n",
        "  def decode_onestep(self, sess, batch, latest_tokens, enc_states, dec_init_states, prev_coverage, prev_decoder_outputs, prev_encoder_es):\n",
        "    \"\"\"For beam search decoding. Run the decoder for one step.\n",
        "    Args:\n",
        "      sess: Tensorflow session.\n",
        "      batch: Batch object containing single example repeated across the batch\n",
        "      latest_tokens: Tokens to be fed as input into the decoder for this timestep\n",
        "      enc_states: The encoder states.\n",
        "      dec_init_states: List of beam_size LSTMStateTuples; the decoder states from the previous timestep\n",
        "      prev_coverage: List of np arrays. The coverage vectors from the previous timestep. List of None if not using coverage.\n",
        "    Returns:\n",
        "      ids: top 2k ids. shape [beam_size, 2*beam_size]\n",
        "      probs: top 2k log probabilities. shape [beam_size, 2*beam_size]\n",
        "      new_states: new states of the decoder. a list length beam_size containing\n",
        "        LSTMStateTuples each of shape ([hidden_dim,],[hidden_dim,])\n",
        "      attn_dists: List length beam_size containing lists length attn_length.\n",
        "      p_gens: Generation probabilities for this step. A list length beam_size. List of None if in baseline mode.\n",
        "      new_coverage: Coverage vectors for this step. A list of arrays. List of None if coverage is not turned on.\n",
        "    \"\"\"\n",
        "\n",
        "    beam_size = len(dec_init_states)\n",
        "\n",
        "    # Turn dec_init_states (a list of LSTMStateTuples) into a single LSTMStateTuple for the batch\n",
        "    cells = [np.expand_dims(state.c, axis=0) for state in dec_init_states]\n",
        "    hiddens = [np.expand_dims(state.h, axis=0) for state in dec_init_states]\n",
        "    new_c = np.concatenate(cells, axis=0)  # shape [batch_size,hidden_dim]\n",
        "    new_h = np.concatenate(hiddens, axis=0)  # shape [batch_size,hidden_dim]\n",
        "    new_dec_in_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)\n",
        "\n",
        "    feed = {\n",
        "        self._enc_states: enc_states,\n",
        "        self._enc_padding_mask: batch.enc_padding_mask,\n",
        "        self._dec_in_state: new_dec_in_state,\n",
        "        self._dec_batch: np.transpose(np.array([latest_tokens])),\n",
        "        self._dec_padding_mask: np.ones((beam_size,1),dtype=np.float32)\n",
        "    }\n",
        "\n",
        "    to_return = {\n",
        "      \"ids\": self._topk_ids,\n",
        "      \"probs\": self._topk_log_probs,\n",
        "      \"states\": self._dec_out_state,\n",
        "      \"attn_dists\": self.attn_dists,\n",
        "      \"final_dists\": self.final_dists\n",
        "    }\n",
        "\n",
        "    if FLAGS.pointer_gen:\n",
        "      feed[self._enc_batch_extend_vocab] = batch.enc_batch_extend_vocab\n",
        "      feed[self._max_art_oovs] = batch.max_art_oovs\n",
        "      to_return['p_gens'] = self.p_gens\n",
        "\n",
        "    if self._hps.coverage:\n",
        "      feed[self.prev_coverage] = np.stack(prev_coverage, axis=0)\n",
        "      to_return['coverage'] = self.coverage\n",
        "\n",
        "    if FLAGS.ac_training or FLAGS.intradecoder:\n",
        "      to_return['output']=self.decoder_outputs\n",
        "    if FLAGS.intradecoder:\n",
        "      feed[self.prev_decoder_outputs]= prev_decoder_outputs\n",
        "    if FLAGS.use_temporal_attention:\n",
        "      to_return['temporal_e'] = self.temporal_es\n",
        "      feed[self.prev_encoder_es] = prev_encoder_es\n",
        "\n",
        "    results = sess.run(to_return, feed_dict=feed) # run the decoder step\n",
        "\n",
        "    # Convert results['states'] (a single LSTMStateTuple) into a list of LSTMStateTuple -- one for each hypothesis\n",
        "    new_states = [tf.contrib.rnn.LSTMStateTuple(results['states'].c[i, :], results['states'].h[i, :]) for i in range(beam_size)]\n",
        "\n",
        "    # Convert singleton list containing a tensor to a list of k arrays\n",
        "    assert len(results['attn_dists'])==1\n",
        "    attn_dists = results['attn_dists'][0].tolist()\n",
        "    final_dists = results['final_dists'][0].tolist()\n",
        "\n",
        "    if FLAGS.pointer_gen:\n",
        "      # Convert singleton list containing a tensor to a list of k arrays\n",
        "      assert len(results['p_gens'])==1\n",
        "      p_gens = results['p_gens'][0].tolist()\n",
        "    else:\n",
        "      p_gens = [None for _ in range(beam_size)]\n",
        "\n",
        "    if FLAGS.ac_training or FLAGS.intradecoder:\n",
        "      output = results['output'][0] # used for calculating the intradecoder at later steps and for calcualting q-estimate in Actor-Critic training.\n",
        "    else:\n",
        "      output = None\n",
        "    if FLAGS.use_temporal_attention:\n",
        "      temporal_e = results['temporal_e'][0] # used for calculating the attention at later steps\n",
        "    else:\n",
        "      temporal_e = None\n",
        "\n",
        "    # Convert the coverage tensor to a list length k containing the coverage vector for each hypothesis\n",
        "    if FLAGS.coverage:\n",
        "      new_coverage = results['coverage'].tolist()\n",
        "      assert len(new_coverage) == beam_size\n",
        "    else:\n",
        "      new_coverage = [None for _ in range(beam_size)]\n",
        "\n",
        "    return results['ids'], results['probs'], new_states, attn_dists, final_dists, p_gens, new_coverage, output, temporal_e\n",
        "\n",
        "def _mask_and_avg(values, padding_mask):\n",
        "  \"\"\"Applies mask to values then returns overall average (a scalar)\n",
        "  Args:\n",
        "    values: a list length max_dec_steps containing arrays shape (batch_size).\n",
        "    padding_mask: tensor shape (batch_size, max_dec_steps) containing 1s and 0s.\n",
        "  Returns:\n",
        "    a scalar\n",
        "  \"\"\"\n",
        "\n",
        "  dec_lens = tf.reduce_sum(padding_mask, axis=1) # shape batch_size. float32\n",
        "  values_per_step = [v * padding_mask[:,dec_step] for dec_step,v in enumerate(values)] # list of k\n",
        "  values_per_ex = sum(values_per_step)/dec_lens # shape (batch_size); normalized value for each batch member\n",
        "  return tf.reduce_mean(values_per_ex) # overall average\n",
        "\n",
        "def _coverage_loss(attn_dists, padding_mask):\n",
        "  \"\"\"Calculates the coverage loss from the attention distributions.\n",
        "  Args:\n",
        "    attn_dists: The attention distributions for each decoder timestep. A list length max_dec_steps containing shape (batch_size, attn_length)\n",
        "    padding_mask: shape (batch_size, max_dec_steps).\n",
        "  Returns:\n",
        "    coverage_loss: scalar\n",
        "  \"\"\"\n",
        "  coverage = tf.zeros_like(attn_dists[0]) # shape (batch_size, attn_length). Initial coverage is zero.\n",
        "  covlosses = [] # Coverage loss per decoder timestep. Will be list length max_dec_steps containing shape (batch_size).\n",
        "  for a in attn_dists:\n",
        "    covloss = tf.reduce_sum(tf.minimum(a, coverage), [1]) # calculate the coverage loss for this step\n",
        "    covlosses.append(covloss)\n",
        "    coverage += a # update the coverage vector\n",
        "  coverage_loss = _mask_and_avg(covlosses, padding_mask)\n",
        "  return coverage_loss"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "f5lP9ZHFMLlD",
        "colab_type": "text"
      },
      "source": [
        "### Replay Buffer"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XfIt9CWWMN1n",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "try:\n",
        "  import queue\n",
        "except:\n",
        "  import Queue as queue\n",
        "from random import shuffle\n",
        "from random import seed\n",
        "seed(123)\n",
        "from threading import Thread\n",
        "import numpy as np\n",
        "import time\n",
        "import tensorflow as tf\n",
        "try:\n",
        "  import Queue as Q  # ver. < 3.0\n",
        "except ImportError:\n",
        "  import queue as Q\n",
        "from sklearn.preprocessing import normalize\n",
        "\n",
        "PriorityQueue = Q.PriorityQueue\n",
        "\n",
        "\n",
        "# Custom prioriy queue that is able to clear the queue once it is full\n",
        "# and cut it to half. Therefore, everytime size of buffer is dqn_replay_buffer_size,\n",
        "# we will keep half of the most valuable states and remove the rest to provide\n",
        "# space for new experiences\n",
        "class CustomQueue(PriorityQueue):\n",
        "  '''\n",
        "  A custom queue subclass that provides a :meth:`clear` method.\n",
        "  '''\n",
        "  def __init__(self, size):\n",
        "    PriorityQueue.__init__(self, size)\n",
        "\n",
        "  def clear(self):\n",
        "    '''\n",
        "    Clears all items from the queue.\n",
        "    '''\n",
        "    with self.mutex:\n",
        "      unfinished = self.unfinished_tasks - len(self.queue)\n",
        "      if unfinished <= 0:\n",
        "        if unfinished < 0:\n",
        "          raise ValueError('task_done() called too many times')\n",
        "        self.all_tasks_done.notify_all()\n",
        "      self.queue = self.queue[0:len(self.queue)/2]\n",
        "      self.unfinished_tasks = unfinished + len(self.queue)\n",
        "      self.not_full.notify_all()\n",
        "\n",
        "  def isempty(self):\n",
        "    with self.mutex:\n",
        "      return len(self.queue) == 0\n",
        "\n",
        "  def isfull(self):\n",
        "    with self.mutex:\n",
        "      return len(self.queue) == self.maxsize\n",
        "\n",
        "class Transition(object):\n",
        "  \"\"\"\n",
        "  A class for holding the experiences collected from seq2seq model\n",
        "  \"\"\"\n",
        "  def __init__(self, state, action, state_prime, action_prime, reward, q_value, done):\n",
        "    \"\"\"\n",
        "      Args:\n",
        "        state: current decoder output state\n",
        "        action: the greedy action selected from current decoder output\n",
        "        state_prime: next decoder output state\n",
        "        reward: reward of the greedy action selected\n",
        "        q_value: Q-value of the greedy action selected\n",
        "        done: whether we reached End-Of-Sequence or not\n",
        "    \"\"\"\n",
        "    self.state = state # size: dqn_input_feature_len\n",
        "    self.action = action # size: 1\n",
        "    self.state_prime = state_prime # size: dqn_input_feature_len\n",
        "    self.action_prime = action_prime\n",
        "    self.reward = reward # size: vocab_size\n",
        "    self.q_value = q_value # size: vocab_size\n",
        "    self.done = done # true/false\n",
        "\n",
        "  def __cmp__(self, item):\n",
        "    \"\"\" PriorityQueue uses this functino to sort the rewards\n",
        "      Args:\n",
        "        We sort the queue such that items with higher rewards are in the head of max-heap\n",
        "    \"\"\"\n",
        "    return cmp(item.reward, self.reward) # bigger numbers have more priority\n",
        "\n",
        "class ReplayBatch(object):\n",
        "  \"\"\" A class for creating batches required for training DDQN. \"\"\"\n",
        "\n",
        "  def __init__(self, hps, example_list, dqn_batch_size, use_state_prime = False, max_art_oovs = 0):\n",
        "    \"\"\"\n",
        "      Args:\n",
        "       hps: seq2seq model parameters\n",
        "       example_list: list of experiences\n",
        "       dqn_batch_size: DDQN batch size\n",
        "       use_state_prime: whether to use the next decoder state to make the batch or the current one\n",
        "       max_art_oovs: number of OOV tokens in current batch\n",
        "      Properties:\n",
        "        _x: The input to DDQN model for training, this is basically the decoder output (dqn_batch_size, dqn_input_feature_len)\n",
        "        _y: The Q-estimation (dqn_batch_size, vocab_size)\n",
        "        _y_extended: The Q-estimation (dqn_batch_size, vocab_size + max_art_oovs)\n",
        "    \"\"\"\n",
        "    self._x = np.zeros((dqn_batch_size, hps.dqn_input_feature_len))\n",
        "    self._y = np.zeros((dqn_batch_size, hps.vocab_size))\n",
        "    self._y_extended = np.zeros((dqn_batch_size, hps.vocab_size + max_art_oovs))\n",
        "    for i,e in enumerate(example_list):\n",
        "      if use_state_prime:\n",
        "        self._x[i,:]=e.state_prime\n",
        "      else:\n",
        "        self._x[i,:]=e.state\n",
        "        self._y[i,:]=normalize([e.q_value[0:hps.vocab_size]], axis=1, norm='l1')\n",
        "      if max_art_oovs == 0:\n",
        "        self._y_extended[i,:] = normalize([e.q_value[0:hps.vocab_size]], axis=1, norm='l1')\n",
        "      else:\n",
        "        self._y_extended[i,:] = e.q_value\n",
        "\n",
        "class ReplayBuffer(object):\n",
        "  \"\"\" A class for implementing the priority experience buffer. \"\"\"\n",
        "\n",
        "  BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold\n",
        "\n",
        "  def __init__(self, hps):\n",
        "    self._hps = hps\n",
        "    self._buffer = CustomQueue(self._hps.dqn_replay_buffer_size)\n",
        "\n",
        "    self._batch_queue = queue.Queue(self.BATCH_QUEUE_MAX)\n",
        "    self._example_queue = queue.Queue(self.BATCH_QUEUE_MAX * self._hps.dqn_batch_size)\n",
        "    self._num_example_q_threads = 1 # num threads to fill example queue\n",
        "    self._num_batch_q_threads = 1  # num threads to fill batch queue\n",
        "    self._bucketing_cache_size = 100 # how many batches-worth of examples to load into cache before bucketing\n",
        "\n",
        "    # Start the threads that load the queues\n",
        "    self._example_q_threads = []\n",
        "    for _ in range(self._num_example_q_threads):\n",
        "      self._example_q_threads.append(Thread(target=self.fill_example_queue))\n",
        "      self._example_q_threads[-1].daemon = True\n",
        "      self._example_q_threads[-1].start()\n",
        "    self._batch_q_threads = []\n",
        "    for _ in range(self._num_batch_q_threads):\n",
        "      self._batch_q_threads.append(Thread(target=self.fill_batch_queue))\n",
        "      self._batch_q_threads[-1].daemon = True\n",
        "      self._batch_q_threads[-1].start()\n",
        "\n",
        "    # Start a thread that watches the other threads and restarts them if they're dead\n",
        "    self._watch_thread = Thread(target=self.watch_threads)\n",
        "    self._watch_thread.daemon = True\n",
        "    self._watch_thread.start()\n",
        "\n",
        "  def next_batch(self):\n",
        "    \"\"\"Return a Batch from the batch queue.\n",
        "    If mode='decode' then each batch contains a single example repeated beam_size-many times; this is necessary for beam search.\n",
        "    Returns:\n",
        "      batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.\n",
        "    \"\"\"\n",
        "    # If the batch queue is empty, print a warning\n",
        "    if self._batch_queue.qsize() == 0:\n",
        "      tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())\n",
        "      return None\n",
        "\n",
        "    batch = self._batch_queue.get() # get the next Batch\n",
        "    return batch\n",
        "\n",
        "  @staticmethod\n",
        "  def create_batch(_hps, batch, batch_size, use_state_prime=False, max_art_oovs=0):\n",
        "    \"\"\" Create a DDQN-compatible batch from the input transitions\n",
        "      Args:\n",
        "        _hps: seq2seq model parameters\n",
        "        batch: a list of Transitions\n",
        "        dqn_batch_size: DDQN batch size\n",
        "        use_state_prime: whether to use the next decoder state to make the batch or the current one\n",
        "        max_art_oovs: number of OOV tokens in current batch\n",
        "      Returns:\n",
        "        An object of ReplayBatch class\n",
        "    \"\"\"\n",
        "\n",
        "    return ReplayBatch(_hps, batch, batch_size, use_state_prime, max_art_oovs)\n",
        "\n",
        "  def fill_example_queue(self):\n",
        "    \"\"\"Reads data from file and processes into Examples which are then placed into the example queue.\"\"\"\n",
        "    while True:\n",
        "      try:\n",
        "        input_gen = self._example_generator().next()\n",
        "      except StopIteration: # if there are no more examples:\n",
        "        tf.logging.info(\"The example generator for this example queue filling thread has exhausted data.\")\n",
        "        raise Exception(\"single_pass mode is off but the example generator is out of data; error.\")\n",
        "      self._example_queue.put(input_gen) # place the pair in the example queue.\n",
        "\n",
        "  def fill_batch_queue(self):\n",
        "    \"\"\"Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue.\"\"\"\n",
        "    while True:\n",
        "      # Get bucketing_cache_size-many batches of Examples into a list, then sort\n",
        "      inputs = []\n",
        "      for _ in range(self._hps.dqn_batch_size * self._bucketing_cache_size):\n",
        "        inputs.append(self._example_queue.get())\n",
        "\n",
        "      # feed back all the samples to the buffer\n",
        "      self.add(inputs)\n",
        "\n",
        "      # Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.\n",
        "      batches = []\n",
        "      for i in range(0, len(inputs), self._hps.dqn_batch_size):\n",
        "        batches.append(inputs[i:i + self._hps.dqn_batch_size])\n",
        "        shuffle(batches)\n",
        "      for b in batches:  # each b is a list of Example objects\n",
        "        self._batch_queue.put(ReplayBatch(self._hps, b, self._hps.dqn_batch_size))\n",
        "\n",
        "  def watch_threads(self):\n",
        "    \"\"\"Watch example queue and batch queue threads and restart if dead.\"\"\"\n",
        "    while True:\n",
        "      time.sleep(60)\n",
        "      for idx,t in enumerate(self._example_q_threads):\n",
        "        if not t.is_alive(): # if the thread is dead\n",
        "          tf.logging.error('Found example queue thread dead. Restarting.')\n",
        "          new_t = Thread(target=self.fill_example_queue)\n",
        "          self._example_q_threads[idx] = new_t\n",
        "          new_t.daemon = True\n",
        "          new_t.start()\n",
        "      for idx,t in enumerate(self._batch_q_threads):\n",
        "        if not t.is_alive(): # if the thread is dead\n",
        "          tf.logging.error('Found batch queue thread dead. Restarting.')\n",
        "          new_t = Thread(target=self.fill_batch_queue)\n",
        "          self._batch_q_threads[idx] = new_t\n",
        "          new_t.daemon = True\n",
        "          new_t.start()\n",
        "\n",
        "  def add(self, items):\n",
        "    \"\"\" Adding a list of experiences to the buffer. When buffer is full,\n",
        "      we get rid of half of the least important experiences and keep the rest.\n",
        "      Args:\n",
        "        items: A list of experiences of size (batch_size, k, max_dec_steps, hidden_dim)\n",
        "    \"\"\"\n",
        "    for item in items:\n",
        "      if not self._buffer.isfull():\n",
        "        self._buffer.put_nowait(item)\n",
        "      else:\n",
        "        print('Replay Buffer is full, getting rid of unimportant transitions...')\n",
        "        self._buffer.clear()\n",
        "        self._buffer.put_nowait(item)\n",
        "    print('ReplayBatch size: {}'.format(self._buffer.qsize()))\n",
        "    print('ReplayBatch example queue size: {}'.format(self._example_queue.qsize()))\n",
        "    print('ReplayBatch batch queue size: {}'.format(self._batch_queue.qsize()))\n",
        "\n",
        "  def _buffer_len(self):\n",
        "    return self._buffer.qsize()\n",
        "\n",
        "  def _example_generator(self):\n",
        "    while True:\n",
        "      if not self._buffer.isempty():\n",
        "        item = self._buffer.get_nowait()\n",
        "        self._buffer.task_done()\n",
        "        yield item"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Vl42_ExqL17l",
        "colab_type": "text"
      },
      "source": [
        "### Beam Search"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "JMIXJiEIL3-l",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n",
        "# Modifications Copyright 2017 Abigail See\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#     http://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License.\n",
        "# ==============================================================================\n",
        "\n",
        "\"\"\"This file contains code to run beam search decoding\"\"\"\n",
        "\n",
        "import tensorflow as tf\n",
        "import numpy as np\n",
        "#import data\n",
        "#from replay_buffer import Transition, ReplayBuffer\n",
        "from collections import Counter\n",
        "from sklearn.preprocessing import normalize\n",
        "\n",
        "FLAGS = tf.app.flags.FLAGS\n",
        "\n",
        "class Hypothesis(object):\n",
        "  \"\"\"Class to represent a hypothesis during beam search. Holds all the information needed for the hypothesis.\"\"\"\n",
        "\n",
        "  def __init__(self, tokens, log_probs, state, decoder_output, encoder_mask, attn_dists, p_gens, coverage):\n",
        "    \"\"\"Hypothesis constructor.\n",
        "    Args:\n",
        "      tokens: List of integers. The ids of the tokens that form the summary so far.\n",
        "      log_probs: List, same length as tokens, of floats, giving the log probabilities of the tokens so far.\n",
        "      state: Current state of the decoder, a LSTMStateTuple.\n",
        "      attn_dists: List, same length as tokens, of numpy arrays with shape (attn_length). These are the attention distributions so far.\n",
        "      p_gens: List, same length as tokens, of floats, or None if not using pointer-generator model. The values of the generation probability so far.\n",
        "      coverage: Numpy array of shape (attn_length), or None if not using coverage. The current coverage vector.\n",
        "    \"\"\"\n",
        "    self.tokens = tokens\n",
        "    self.log_probs = log_probs\n",
        "    self.state = state\n",
        "    self.decoder_output = decoder_output\n",
        "    self.encoder_mask = encoder_mask\n",
        "    self.attn_dists = attn_dists\n",
        "    self.p_gens = p_gens\n",
        "    self.coverage = coverage\n",
        "\n",
        "  def extend(self, token, log_prob, state, decoder_output, encoder_mask, attn_dist, p_gen, coverage):\n",
        "    \"\"\"Return a NEW hypothesis, extended with the information from the latest step of beam search.\n",
        "    Args:\n",
        "      token: Integer. Latest token produced by beam search.\n",
        "      log_prob: Float. Log prob of the latest token.\n",
        "      state: Current decoder state, a LSTMStateTuple.\n",
        "      attn_dist: Attention distribution from latest step. Numpy array shape (attn_length).\n",
        "      p_gen: Generation probability on latest step. Float.\n",
        "      coverage: Latest coverage vector. Numpy array shape (attn_length), or None if not using coverage.\n",
        "    Returns:\n",
        "      New Hypothesis for next step.\n",
        "    \"\"\"\n",
        "    if FLAGS.avoid_trigrams and self._has_trigram(self.tokens + [token]):\n",
        "        log_prob = -np.infty\n",
        "    return Hypothesis(tokens = self.tokens + [token],\n",
        "                      log_probs = self.log_probs + [log_prob],\n",
        "                      state = state,\n",
        "                      decoder_output= self.decoder_output + [decoder_output] if decoder_output is not None else [],\n",
        "                      encoder_mask = self.encoder_mask + [encoder_mask] if encoder_mask is not None else [],\n",
        "                      attn_dists = self.attn_dists + [attn_dist],\n",
        "                      p_gens = self.p_gens + [p_gen],\n",
        "                      coverage = coverage)\n",
        "\n",
        "  def _find_ngrams(self, input_list, n):\n",
        "      return zip(*[input_list[i:] for i in range(n)])\n",
        "\n",
        "  def _has_trigram(self, tokens):\n",
        "      tri_grams = self._find_ngrams(tokens, 3)\n",
        "      cnt = Counter(tri_grams)\n",
        "      return not all((cnt[g] == 1 for g in cnt))\n",
        "\n",
        "  @property\n",
        "  def latest_token(self):\n",
        "    return self.tokens[-1]\n",
        "\n",
        "  @property\n",
        "  def log_prob(self):\n",
        "    # the log probability of the hypothesis so far is the sum of the log probabilities of the tokens so far\n",
        "    return sum(self.log_probs)\n",
        "\n",
        "  @property\n",
        "  def avg_log_prob(self):\n",
        "    # normalize log probability by number of tokens (otherwise longer sequences always have lower probability)\n",
        "    return self.log_prob / len(self.tokens)\n",
        "\n",
        "\n",
        "def run_beam_search(sess, model, vocab, batch, dqn = None, dqn_sess = None, dqn_graph = None):\n",
        "  \"\"\"Performs beam search decoding on the given example.\n",
        "  Args:\n",
        "    sess: a tf.Session\n",
        "    model: a seq2seq model\n",
        "    vocab: Vocabulary object\n",
        "    batch: Batch object that is the same example repeated across the batch\n",
        "  Returns:\n",
        "    best_hyp: Hypothesis object; the best hypothesis found by beam search.\n",
        "  \"\"\"\n",
        "  # Run the encoder to get the encoder hidden states and decoder initial state\n",
        "  enc_states, dec_in_state = model.run_encoder(sess, batch)\n",
        "  # dec_in_state is a LSTMStateTuple\n",
        "  # enc_states has shape [batch_size, <=max_enc_steps, 2*hidden_dim].\n",
        "\n",
        "  # Initialize beam_size-many hyptheses\n",
        "  hyps = [Hypothesis(tokens=[vocab.word2id(START_DECODING)],\n",
        "                     log_probs=[0.0],\n",
        "                     state=dec_in_state,\n",
        "                     decoder_output = [np.zeros([FLAGS.dec_hidden_dim])],\n",
        "                     encoder_mask = [np.zeros([batch.enc_batch.shape[1]])],\n",
        "                     attn_dists=[],\n",
        "                     p_gens=[],\n",
        "                     coverage=np.zeros([batch.enc_batch.shape[1]]) # zero vector of length attention_length\n",
        "                     ) for _ in range(FLAGS.beam_size)]\n",
        "  results = [] # this will contain finished hypotheses (those that have emitted the [STOP] token)\n",
        "\n",
        "  steps = 0\n",
        "  while steps < FLAGS.max_dec_steps and len(results) < FLAGS.beam_size:\n",
        "    latest_tokens = [h.latest_token for h in hyps] # latest token produced by each hypothesis\n",
        "    latest_tokens = [t if t in range(vocab.size()) else vocab.word2id(UNKNOWN_TOKEN) for t in latest_tokens] # change any in-article temporary OOV ids to [UNK] id, so that we can lookup word embeddings\n",
        "    states = [h.state for h in hyps] # list of current decoder states of the hypotheses\n",
        "    prev_coverage = [h.coverage for h in hyps] # list of coverage vectors (or None)\n",
        "    decoder_outputs = np.array([h.decoder_output for h in hyps]).swapaxes(0, 1) # shape (?, batch_size, dec_hidden_dim)\n",
        "    encoder_es = np.array([h.encoder_mask for h in hyps]).swapaxes(0, 1)  # shape (?, batch_size, enc_hidden_dim)\n",
        "    # Run one step of the decoder to get the new info\n",
        "    (topk_ids, topk_log_probs, new_states, attn_dists, final_dists, p_gens, new_coverage, decoder_output, encoder_e) = model.decode_onestep(sess=sess,\n",
        "                        batch=batch,\n",
        "                        latest_tokens=latest_tokens,\n",
        "                        enc_states=enc_states,\n",
        "                        dec_init_states=states,\n",
        "                        prev_coverage=prev_coverage,\n",
        "                        prev_decoder_outputs= decoder_outputs if FLAGS.intradecoder else tf.stack([], axis=0),\n",
        "                        prev_encoder_es = encoder_es if FLAGS.use_temporal_attention else tf.stack([], axis=0))\n",
        "\n",
        "    if FLAGS.ac_training:\n",
        "      with dqn_graph.as_default():\n",
        "        dqn_results = dqn.run_test_steps(dqn_sess, x=decoder_output)\n",
        "        q_estimates = dqn_results['estimates'] # shape (len(transitions), vocab_size)\n",
        "        # we use the q_estimate of UNK token for all the OOV tokens\n",
        "        q_estimates = np.concatenate([q_estimates,np.reshape(q_estimates[:,0],[-1,1])*np.ones((FLAGS.beam_size,batch.max_art_oovs))],axis=-1)\n",
        "        # normalized q_estimate\n",
        "        q_estimates = normalize(q_estimates, axis=1, norm='l1')\n",
        "        combined_estimates = final_dists * q_estimates\n",
        "        combined_estimates = normalize(combined_estimates, axis=1, norm='l1')\n",
        "        # overwriting topk ids and probs\n",
        "        topk_ids = np.argsort(combined_estimates,axis=-1)[:,-FLAGS.beam_size*2:][:,::-1]\n",
        "        topk_probs = [combined_estimates[i,_] for i,_ in enumerate(topk_ids)]\n",
        "        topk_log_probs = np.log(topk_probs)\n",
        "\n",
        "    # Extend each hypothesis and collect them all in all_hyps\n",
        "    all_hyps = []\n",
        "    num_orig_hyps = 1 if steps == 0 else len(hyps) # On the first step, we only had one original hypothesis (the initial hypothesis). On subsequent steps, all original hypotheses are distinct.\n",
        "    for i in range(num_orig_hyps):\n",
        "      h, new_state, attn_dist, p_gen, new_coverage_i = hyps[i], new_states[i], attn_dists[i], p_gens[i], new_coverage[i]  # take the ith hypothesis and new decoder state info\n",
        "      decoder_output_i = None\n",
        "      encoder_mask_i = None\n",
        "      if FLAGS.intradecoder:\n",
        "        decoder_output_i = decoder_output[i]\n",
        "      if FLAGS.use_temporal_attention:\n",
        "        encoder_mask_i = encoder_e[i]\n",
        "      for j in range(FLAGS.beam_size * 2):  # for each of the top 2*beam_size hyps:\n",
        "        # Extend the ith hypothesis with the jth option\n",
        "        new_hyp = h.extend(token=topk_ids[i, j],\n",
        "                           log_prob=topk_log_probs[i, j],\n",
        "                           state=new_state,\n",
        "                           decoder_output = decoder_output_i,\n",
        "                           encoder_mask = encoder_mask_i,\n",
        "                           attn_dist=attn_dist,\n",
        "                           p_gen=p_gen,\n",
        "                           coverage=new_coverage_i)\n",
        "        all_hyps.append(new_hyp)\n",
        "\n",
        "    # Filter and collect any hypotheses that have produced the end token.\n",
        "    hyps = [] # will contain hypotheses for the next step\n",
        "    for h in sort_hyps(all_hyps): # in order of most likely h\n",
        "      if h.latest_token == vocab.word2id(STOP_DECODING): # if stop token is reached...\n",
        "        # If this hypothesis is sufficiently long, put in results. Otherwise discard.\n",
        "        if steps >= FLAGS.min_dec_steps:\n",
        "          results.append(h)\n",
        "      else: # hasn't reached stop token, so continue to extend this hypothesis\n",
        "        hyps.append(h)\n",
        "      if len(hyps) == FLAGS.beam_size or len(results) == FLAGS.beam_size:\n",
        "        # Once we've collected beam_size-many hypotheses for the next step, or beam_size-many complete hypotheses, stop.\n",
        "        break\n",
        "\n",
        "    steps += 1\n",
        "\n",
        "  # At this point, either we've got beam_size results, or we've reached maximum decoder steps\n",
        "\n",
        "  if len(results)==0: # if we don't have any complete results, add all current hypotheses (incomplete summaries) to results\n",
        "    results = hyps\n",
        "\n",
        "  # Sort hypotheses by average log probability\n",
        "  hyps_sorted = sort_hyps(results)\n",
        "\n",
        "  # Return the hypothesis with highest average log prob\n",
        "  return hyps_sorted[0]\n",
        "\n",
        "def sort_hyps(hyps):\n",
        "  \"\"\"Return a list of Hypothesis objects, sorted by descending average log probability\"\"\"\n",
        "  return sorted(hyps, key=lambda h: h.avg_log_prob, reverse=True)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "HArp8MJDMePU",
        "colab_type": "text"
      },
      "source": [
        "### Util"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "CDHyrNwaMfVK",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n",
        "# Modifications Copyright 2017 Abigail See\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#     http://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License.\n",
        "# ==============================================================================\n",
        "\n",
        "\"\"\"This file contains some utility functions\"\"\"\n",
        "\n",
        "import tensorflow as tf\n",
        "import time\n",
        "import os\n",
        "FLAGS = tf.app.flags.FLAGS\n",
        "\n",
        "def get_config():\n",
        "  \"\"\"Returns config for tf.session\"\"\"\n",
        "  config = tf.ConfigProto(allow_soft_placement=True)\n",
        "  #config = tf.ConfigProto(log_device_placement=True)\n",
        "  config.gpu_options.allow_growth=True\n",
        "  return config\n",
        "\n",
        "def load_ckpt(saver, sess, ckpt_dir=\"train\"):\n",
        "  \"\"\"Load checkpoint from the ckpt_dir (if unspecified, this is train dir) and restore it to saver and sess, waiting 10 secs in the case of failure. Also returns checkpoint name.\"\"\"\n",
        "  while True:\n",
        "    try:\n",
        "      latest_filename = \"checkpoint_best\" if ckpt_dir==\"eval\" else None\n",
        "      ckpt_dir = os.path.join(FLAGS.log_root, ckpt_dir)\n",
        "      ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=latest_filename)\n",
        "      tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n",
        "      saver.restore(sess, ckpt_state.model_checkpoint_path)\n",
        "      return ckpt_state.model_checkpoint_path\n",
        "    except:\n",
        "      tf.logging.info(\"Failed to load checkpoint from %s. Sleeping for %i secs...\", ckpt_dir, 10)\n",
        "      time.sleep(10)\n",
        "\n",
        "def load_dqn_ckpt(saver, sess):\n",
        "  \"\"\"Load checkpoint from the ckpt_dir (if unspecified, this is train dir) and restore it to saver and sess, waiting 10 secs in the case of failure. Also returns checkpoint name.\"\"\"\n",
        "  while True:\n",
        "    try:\n",
        "      ckpt_dir = os.path.join(FLAGS.log_root, \"dqn\", \"train\")\n",
        "      ckpt_state = tf.train.get_checkpoint_state(ckpt_dir)\n",
        "      tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n",
        "      saver.restore(sess, ckpt_state.model_checkpoint_path)\n",
        "      return ckpt_state.model_checkpoint_path\n",
        "    except:\n",
        "      tf.logging.info(\"Failed to load checkpoint from %s. Sleeping for %i secs...\", ckpt_dir, 10)\n",
        "      time.sleep(10)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "HyX9o0teI4fb",
        "colab_type": "text"
      },
      "source": [
        "### Decode"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "y8QzCLjuI6_j",
        "colab_type": "text"
      },
      "source": [
        "https://github.com/yaserkl/RLSeq2Seq/blob/master/src/decode.py"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "F-vKdSWVJC2b",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n",
        "# Modifications Copyright 2017 Abigail See\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#     http://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License.\n",
        "# ==============================================================================\n",
        "\n",
        "\"\"\"This file contains code to run beam search decoding, including running ROUGE evaluation and producing JSON datafiles for the in-browser attention visualizer, which can be found here https://github.com/abisee/attn_vis\"\"\"\n",
        "\n",
        "import os\n",
        "import time\n",
        "import tensorflow as tf\n",
        "#import beam_search\n",
        "#import data\n",
        "import json\n",
        "import pyrouge\n",
        "#import util\n",
        "import logging\n",
        "#from unidecode import unidecode\n",
        "\n",
        "FLAGS = tf.app.flags.FLAGS\n",
        "\n",
        "SECS_UNTIL_NEW_CKPT = 60  # max number of seconds before loading new checkpoint\n",
        "\n",
        "article = []\n",
        "reference = []\n",
        "summary = []\n",
        "\n",
        "class BeamSearchDecoder(object):\n",
        "  \"\"\"Beam search decoder.\"\"\"\n",
        "\n",
        "  def __init__(self, model, batcher, vocab, dqn = None):\n",
        "    \"\"\"Initialize decoder.\n",
        "    Args:\n",
        "      model: a Seq2SeqAttentionModel object.\n",
        "      batcher: a Batcher object.\n",
        "      vocab: Vocabulary object\n",
        "    \"\"\"\n",
        "    self._model = model\n",
        "    self._model.build_graph()\n",
        "    self._batcher = batcher\n",
        "    self._vocab = vocab\n",
        "    self._saver = tf.train.Saver() # we use this to load checkpoints for decoding\n",
        "    self._sess = tf.Session(config=get_config())\n",
        "\n",
        "    if FLAGS.ac_training:\n",
        "      self._dqn = dqn\n",
        "      self._dqn_graph = tf.Graph()\n",
        "      with self._dqn_graph.as_default():\n",
        "        self._dqn.build_graph()\n",
        "        self._dqn_saver = tf.train.Saver() # we use this to load checkpoints for decoding\n",
        "        self._dqn_sess = tf.Session(config=get_config())\n",
        "        _ = load_dqn_ckpt(self._dqn_saver, self._dqn_sess)\n",
        "\n",
        "    # Load an initial checkpoint to use for decoding\n",
        "    ckpt_path = load_ckpt(self._saver, self._sess, FLAGS.decode_from)\n",
        "\n",
        "    if FLAGS.single_pass:\n",
        "      # Make a descriptive decode directory name\n",
        "      ckpt_name = \"{}-ckpt-\".format(FLAGS.decode_from) + ckpt_path.split('-')[\n",
        "        -1]  # this is something of the form \"ckpt-123456\"\n",
        "      self._decode_dir = os.path.join(FLAGS.log_root, get_decode_dir_name(ckpt_name))\n",
        "    else: # Generic decode dir name\n",
        "      self._decode_dir = os.path.join(FLAGS.log_root, \"decode\")\n",
        "\n",
        "    # Make the decode dir if necessary\n",
        "    if not os.path.exists(self._decode_dir): os.mkdir(self._decode_dir)\n",
        "\n",
        "    if FLAGS.single_pass:\n",
        "      # Make the dirs to contain output written in the correct format for pyrouge\n",
        "      self._rouge_ref_dir = os.path.join(self._decode_dir, \"reference\")\n",
        "      if not os.path.exists(self._rouge_ref_dir): os.mkdir(self._rouge_ref_dir)\n",
        "      self._rouge_dec_dir = os.path.join(self._decode_dir, \"decoded\")\n",
        "      if not os.path.exists(self._rouge_dec_dir): os.mkdir(self._rouge_dec_dir)\n",
        "\n",
        "  def decode(self):\n",
        "    \"\"\"Decode examples until data is exhausted (if FLAGS.single_pass) and return, or decode indefinitely, loading latest checkpoint at regular intervals\"\"\"\n",
        "    t0 = time.time()\n",
        "    counter = FLAGS.decode_after\n",
        "    while True:\n",
        "      tf.reset_default_graph()\n",
        "      batch = self._batcher.next_batch()  # 1 example repeated across batch\n",
        "      if batch is None: # finished decoding dataset in single_pass mode\n",
        "        assert FLAGS.single_pass, \"Dataset exhausted, but we are not in single_pass mode\"\n",
        "        tf.logging.info(\"Decoder has finished reading dataset for single_pass.\")\n",
        "        tf.logging.info(\"Output has been saved in %s and %s. Now starting ROUGE eval...\", self._rouge_ref_dir, self._rouge_dec_dir)\n",
        "        results_dict = rouge_eval(self._rouge_ref_dir, self._rouge_dec_dir)\n",
        "        rouge_log(results_dict, self._decode_dir)\n",
        "        return\n",
        "\n",
        "      original_article = batch.original_articles[0]  # string\n",
        "      original_abstract = batch.original_abstracts[0]  # string\n",
        "      original_abstract_sents = batch.original_abstracts_sents[0]  # list of strings\n",
        "\n",
        "      article_withunks = show_art_oovs(original_article, self._vocab) # string\n",
        "      abstract_withunks = show_abs_oovs(original_abstract, self._vocab, (batch.art_oovs[0] if FLAGS.pointer_gen else None)) # string\n",
        "\n",
        "      # Run beam search to get best Hypothesis\n",
        "      if FLAGS.ac_training:\n",
        "        best_hyp = run_beam_search(self._sess, self._model, self._vocab, batch, self._dqn, self._dqn_sess, self._dqn_graph)\n",
        "      else:\n",
        "        best_hyp = run_beam_search(self._sess, self._model, self._vocab, batch)\n",
        "      # Extract the output ids from the hypothesis and convert back to words\n",
        "      output_ids = [int(t) for t in best_hyp.tokens[1:]]\n",
        "      decoded_words = outputids2words(output_ids, self._vocab, (batch.art_oovs[0] if FLAGS.pointer_gen else None))\n",
        "      print(decoded_words)\n",
        "      # Remove the [STOP] token from decoded_words, if necessary\n",
        "      try:\n",
        "        fst_stop_idx = decoded_words.index(STOP_DECODING) # index of the (first) [STOP] symbol\n",
        "        decoded_words = decoded_words[:fst_stop_idx]\n",
        "      except ValueError:\n",
        "        decoded_words = decoded_words\n",
        "        \n",
        "      decoded_output = ' '.join(decoded_words) # single string\n",
        "        \n",
        "      if FLAGS.single_pass:#me when \n",
        "        #self.write_for_rouge(original_abstract_sents, decoded_words, counter) # write ref summary and decoded summary to file, to eval with pyrouge later\n",
        "        \n",
        "        print(\"file written\")\n",
        "        article.append(article_withunks)\n",
        "        reference.append(abstract_withunks)\n",
        "        summary.append(decoded_output)\n",
        "        \n",
        "        counter += 1 # this is how many examples we've decoded\n",
        "        tf.logging.info(\"sentence summarized \" + str(counter))\n",
        "        if counter == 100 :\n",
        "            tf.logging.info(\"Counter 100 stopped.\")\n",
        "            return\n",
        "      else:\n",
        "        print_results(article_withunks, abstract_withunks, decoded_output) # log output to screen\n",
        "        self.write_for_attnvis(article_withunks, abstract_withunks, decoded_words, best_hyp.attn_dists, best_hyp.p_gens) # write info to .json file for visualization tool\n",
        "\n",
        "        # Check if SECS_UNTIL_NEW_CKPT has elapsed; if so return so we can load a new checkpoint\n",
        "        t1 = time.time()\n",
        "        if t1-t0 > SECS_UNTIL_NEW_CKPT:\n",
        "          tf.logging.info('We\\'ve been decoding with same checkpoint for %i seconds. Time to load new checkpoint', t1-t0)\n",
        "          _ = load_ckpt(self._saver, self._sess, FLAGS.decode_from)\n",
        "          t0 = time.time()\n",
        "\n",
        "  def remove_non_ascii(self, text):\n",
        "    #try:\n",
        "    #  return unicode(unidecode(unicode(text, encoding=\"utf-8\")))\n",
        "    #except:\n",
        "    return text #str(unidecode(text))\n",
        "\n",
        "  def write_for_rouge(self, reference_sents, decoded_words, ex_index):\n",
        "    \"\"\"Write output to file in correct format for eval with pyrouge. This is called in single_pass mode.\n",
        "    Args:\n",
        "      reference_sents: list of strings\n",
        "      decoded_words: list of strings\n",
        "      ex_index: int, the index with which to label the files\n",
        "    \"\"\"\n",
        "    # First, divide decoded output into sentences\n",
        "    decoded_sents = []\n",
        "    while len(decoded_words) > 0:\n",
        "      try:\n",
        "        fst_period_idx = decoded_words.index(\".\")\n",
        "      except ValueError: # there is text remaining that doesn't end in \".\"\n",
        "        fst_period_idx = len(decoded_words)\n",
        "      sent = decoded_words[:fst_period_idx+1] # sentence up to and including the period\n",
        "      decoded_words = decoded_words[fst_period_idx+1:] # everything else\n",
        "      decoded_sents.append(' '.join(sent))\n",
        "\n",
        "    # pyrouge calls a perl script that puts the data into HTML files.\n",
        "    # Therefore we need to make our output HTML safe.\n",
        "    decoded_sents = [self.remove_non_ascii(make_html_safe(w)) for w in decoded_sents]\n",
        "    reference_sents = [self.remove_non_ascii(make_html_safe(w)) for w in reference_sents]\n",
        "\n",
        "    # Write to file\n",
        "    ref_file = os.path.join(self._rouge_ref_dir, \"%06d_reference.txt\" % ex_index)\n",
        "    decoded_file = os.path.join(self._rouge_dec_dir, \"%06d_decoded.txt\" % ex_index)\n",
        "\n",
        "    with open(ref_file, \"w\") as f:\n",
        "      for idx,sent in enumerate(reference_sents):\n",
        "        f.write(sent) if idx==len(reference_sents)-1 else f.write(sent+\"\\n\")\n",
        "    with open(decoded_file, \"w\") as f:\n",
        "      for idx,sent in enumerate(decoded_sents):\n",
        "        f.write(sent) if idx==len(decoded_sents)-1 else f.write(sent+\"\\n\")\n",
        "\n",
        "    tf.logging.info(\"Wrote example %i to file\" % ex_index)\n",
        "\n",
        "  def write_for_attnvis(self, article, abstract, decoded_words, attn_dists, p_gens):\n",
        "    \"\"\"Write some data to json file, which can be read into the in-browser attention visualizer tool:\n",
        "      https://github.com/abisee/attn_vis\n",
        "    Args:\n",
        "      article: The original article string.\n",
        "      abstract: The human (correct) abstract string.\n",
        "      attn_dists: List of arrays; the attention distributions.\n",
        "      decoded_words: List of strings; the words of the generated summary.\n",
        "      p_gens: List of scalars; the p_gen values. If not running in pointer-generator mode, list of None.\n",
        "    \"\"\"\n",
        "    article_lst = article.split() # list of words\n",
        "    decoded_lst = decoded_words # list of decoded words\n",
        "    to_write = {\n",
        "        'article_lst': [make_html_safe(t) for t in article_lst],\n",
        "        'decoded_lst': [make_html_safe(t) for t in decoded_lst],\n",
        "        'abstract_str': make_html_safe(abstract),\n",
        "        'attn_dists': attn_dists\n",
        "    }\n",
        "    if FLAGS.pointer_gen:\n",
        "      to_write['p_gens'] = p_gens\n",
        "    output_fname = os.path.join(self._decode_dir, 'attn_vis_data.json')\n",
        "    with open(output_fname, 'w') as output_file:\n",
        "      json.dump(to_write, output_file)\n",
        "    tf.logging.info('Wrote visualization data to %s', output_fname)\n",
        "\n",
        "\n",
        "def print_results(article, abstract, decoded_output):\n",
        "  \"\"\"Prints the article, the reference summmary and the decoded summary to screen\"\"\"\n",
        "  print(\"\")\n",
        "  tf.logging.info('ARTICLE:  %s', article)\n",
        "  tf.logging.info('REFERENCE SUMMARY: %s', abstract)\n",
        "  tf.logging.info('GENERATED SUMMARY: %s', decoded_output)\n",
        "  print(\"\")\n",
        "\n",
        "\n",
        "def make_html_safe(s):\n",
        "  try:\n",
        "    \"\"\"Replace any angled brackets in string s to avoid interfering with HTML attention visualizer.\"\"\"\n",
        "    s.replace(\"<\", \"&lt;\")\n",
        "    s.replace(\">\", \"&gt;\")\n",
        "  except:\n",
        "    pass\n",
        "  return s\n",
        "\n",
        "\n",
        "def rouge_eval(ref_dir, dec_dir):\n",
        "  \"\"\"Evaluate the files in ref_dir and dec_dir with pyrouge, returning results_dict\"\"\"\n",
        "  r = pyrouge.Rouge155()\n",
        "  r.model_filename_pattern = '#ID#_reference.txt'\n",
        "  r.system_filename_pattern = '(\\d+)_decoded.txt'\n",
        "  r.model_dir = ref_dir\n",
        "  r.system_dir = dec_dir\n",
        "  logging.getLogger('global').setLevel(logging.WARNING) # silence pyrouge logging\n",
        "  rouge_results = r.convert_and_evaluate()\n",
        "  return r.output_to_dict(rouge_results)\n",
        "\n",
        "\n",
        "def rouge_log(results_dict, dir_to_write):\n",
        "  \"\"\"Log ROUGE results to screen and write to file.\n",
        "  Args:\n",
        "    results_dict: the dictionary returned by pyrouge\n",
        "    dir_to_write: the directory where we will write the results to\"\"\"\n",
        "  log_str = \"\"\n",
        "  for x in [\"1\",\"2\",\"l\"]:\n",
        "    log_str += \"\\nROUGE-%s:\\n\" % x\n",
        "    for y in [\"f_score\", \"recall\", \"precision\"]:\n",
        "      key = \"rouge_%s_%s\" % (x,y)\n",
        "      key_cb = key + \"_cb\"\n",
        "      key_ce = key + \"_ce\"\n",
        "      val = results_dict[key]\n",
        "      val_cb = results_dict[key_cb]\n",
        "      val_ce = results_dict[key_ce]\n",
        "      log_str += \"%s: %.4f with confidence interval (%.4f, %.4f)\\n\" % (key, val, val_cb, val_ce)\n",
        "  tf.logging.info(log_str) # log to screen\n",
        "  results_file = os.path.join(dir_to_write, \"ROUGE_results.txt\")\n",
        "  tf.logging.info(\"Writing final ROUGE results to %s...\", results_file)\n",
        "  with open(results_file, \"w\") as f:\n",
        "    f.write(log_str)\n",
        "\n",
        "def get_decode_dir_name(ckpt_name):\n",
        "  \"\"\"Make a descriptive name for the decode dir, including the name of the checkpoint we use to decode. This is called in single_pass mode.\"\"\"\n",
        "\n",
        "  if \"train\" in FLAGS.data_path: dataset = \"train\"\n",
        "  elif \"val\" in FLAGS.data_path: dataset = \"val\"\n",
        "  elif \"test\" in FLAGS.data_path: dataset = \"test\"\n",
        "  else: raise ValueError(\"FLAGS.data_path %s should contain one of train, val or test\" % (FLAGS.data_path))\n",
        "  dirname = \"decode_%s_%s_%imaxenc_%ibeam_%imindec_%imaxdec\" % (dataset, FLAGS.decode_from, FLAGS.max_enc_steps, FLAGS.beam_size, FLAGS.min_dec_steps, FLAGS.max_dec_steps)\n",
        "  if ckpt_name is not None:\n",
        "    dirname += \"_%s\" % ckpt_name\n",
        "  return dirname"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "0CT_2nWdJNnD",
        "colab_type": "text"
      },
      "source": [
        "### DQN"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "VomJ-_qOJPLx",
        "colab_type": "text"
      },
      "source": [
        "https://github.com/yaserkl/RLSeq2Seq/blob/master/src/dqn.py"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "BmnGbytcJPb7",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import tensorflow as tf\n",
        "#import tensorlayer as tl\n",
        "import numpy as np\n",
        "\n",
        "class DQN(object):\n",
        "    def __init__(self, hps, name_variable):\n",
        "        self._hps = hps\n",
        "        self._name_variable = name_variable\n",
        "\n",
        "    def variable_summaries(self, var_name, var):\n",
        "        \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n",
        "        with tf.name_scope('summaries_{}'.format(var_name)):\n",
        "            mean = tf.reduce_mean(var)\n",
        "            tf.summary.scalar('mean', mean)\n",
        "            with tf.name_scope('stddev'):\n",
        "                stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n",
        "            tf.summary.scalar('stddev', stddev)\n",
        "            tf.summary.scalar('max', tf.reduce_max(var))\n",
        "            tf.summary.scalar('min', tf.reduce_min(var))\n",
        "            tf.summary.histogram('histogram', var)\n",
        "\n",
        "    def _add_placeholders(self):\n",
        "        \"\"\"Add placeholders to the graph. These are entry points for any input data.\"\"\"\n",
        "        self._x = tf.placeholder(tf.float32, [None, self._hps.dqn_input_feature_len], name='x') # size (dataset_len, input_feature_len)\n",
        "        self._y = tf.placeholder(tf.float32, [None, self._hps.vocab_size], name='y') # size (dataset_len, 1)\n",
        "        self._train_step = tf.placeholder(tf.int32, None,name='train_step')\n",
        "\n",
        "    def _make_feed_dict(self, batch):\n",
        "        feed_dict = {}\n",
        "        feed_dict[self._x] = batch._x\n",
        "        feed_dict[self._y] = batch._y\n",
        "        return feed_dict\n",
        "\n",
        "    def _add_tf_layers(self):\n",
        "        \"\"\" Based on the dqn_layers flag, it creates multiple dense layers to do the regression. \"\"\"\n",
        "\n",
        "        h = tf.layers.dense(self._x, units = self._hps.dqn_input_feature_len, activation=tf.nn.relu, name='{}_input_layer'.format(self._name_variable))\n",
        "        for i, layer in enumerate(self._hps.dqn_layers.split(',')):\n",
        "            h = tf.layers.dense(h, units = int(layer), activation = tf.nn.relu, name='{}_h_{}'.format(self._name_variable, i))\n",
        "\n",
        "        self.advantage_layer = tf.layers.dense(h, units = self._hps.vocab_size, activation = tf.nn.softmax, name='{}_advantage'.format(self._name_variable))\n",
        "        if self._hps.dueling_net:\n",
        "            # in dueling net, we have two extra output layers; one for value function estimation\n",
        "            # and the other for advantage estimation, we then use the difference between these two layers\n",
        "            # to calculate the q-estimation\n",
        "            self_layer = tf.layers.dense(h, units = 1, activation = tf.identity, name='{}_value'.format(self._name_variable))\n",
        "            normalized_al = self.advantage_layer-tf.reshape(tf.reduce_mean(self.advantage_layer,axis=1),[-1,1]) # equation 9 in https://arxiv.org/pdf/1511.06581.pdf\n",
        "            value_extended = tf.concat([self_layer] * self._hps.vocab_size, axis=1)\n",
        "            self.output = value_extended + normalized_al\n",
        "        else:\n",
        "            self.output = self.advantage_layer\n",
        "\n",
        "    def _add_train_op(self):\n",
        "        # In regression, the objective loss is Mean Squared Error (MSE).\n",
        "        self.loss = tf.losses.mean_squared_error(labels = self._y, predictions = self.output)\n",
        "\n",
        "        tvars = tf.trainable_variables()\n",
        "        gradients = tf.gradients(self.loss, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)\n",
        "\n",
        "        # Clip the gradients\n",
        "        with tf.device(\"/gpu:{}\".format(self._hps.dqn_gpu_num)):\n",
        "            grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)\n",
        "\n",
        "        # Add a summary\n",
        "        tf.summary.scalar('global_norm', global_norm)\n",
        "\n",
        "        # Apply adagrad optimizer\n",
        "        optimizer = tf.train.AdamOptimizer(self._hps.lr)\n",
        "        with tf.device(\"/gpu:{}\".format(self._hps.dqn_gpu_num)):\n",
        "            self.train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')\n",
        "\n",
        "        self.variable_summaries('dqn_loss',self.loss)\n",
        "\n",
        "    def _add_update_weights_op(self):\n",
        "        \"\"\" Updates the weight of the target network based on the current network. \"\"\"\n",
        "        self.model_trainables = tf.trainable_variables(scope='{}_relay_network'.format(self._name_variable)) # target variables\n",
        "        self._new_trainables = [tf.placeholder(tf.float32, None,name='trainables_{}'.format(i)) for i in range(len(self.model_trainables))]\n",
        "        self.assign_ops = []\n",
        "        if self._hps.dqn_polyak_averaging: # target parameters are slowly updating using: \\phi_target = \\tau * \\phi_target + (1-\\tau) * \\phi_target\n",
        "            tau = (tf.cast(self._train_step,tf.float32) % self._hps.dqn_target_update)/float(self._hps.dqn_target_update)\n",
        "            for i, mt in enumerate(self.model_trainables):\n",
        "                nt = self._new_trainables[i]\n",
        "                self.assign_ops.append(mt.assign(tau * mt + (1-tau) * nt))\n",
        "        else:\n",
        "          if self._train_step % self._hps.dqn_target_update == 0:\n",
        "            for i, mt in enumerate(self.model_trainables):\n",
        "                nt = self._new_trainables[i]\n",
        "                self.assign_ops.append(mt.assign(nt))\n",
        "\n",
        "    def build_graph(self):\n",
        "        with tf.variable_scope('{}_relay_network'.format(self._name_variable)), tf.device(\"/gpu:{}\".format(self._hps.dqn_gpu_num)):\n",
        "            self.global_step = tf.Variable(0, name='global_step', trainable=False)\n",
        "            self._add_placeholders()\n",
        "            self._add_tf_layers()\n",
        "            self._add_train_op()\n",
        "            self._add_update_weights_op()\n",
        "            self._summaries = tf.summary.merge_all()\n",
        "\n",
        "    def run_train_steps(self, sess, batch):\n",
        "        feed_dict = self._make_feed_dict(batch)\n",
        "        to_return = {'train_op': self.train_op,\n",
        "        'summaries': self._summaries,\n",
        "        'loss': self.loss,\n",
        "        'global_step': self.global_step}\n",
        "        return sess.run(to_return, feed_dict)\n",
        "\n",
        "    def run_test_steps(self, sess, x, y=None, return_loss=False, return_best_action=False):\n",
        "        # when return_loss is True, the model will return the loss of the prediction\n",
        "        # return_loss should be False, during estimation (decoding)\n",
        "        feed_dict = {self._x:x}\n",
        "        to_return = {'estimates': self.output}\n",
        "        if return_loss:\n",
        "            feed_dict.update({self._y:y})\n",
        "            to_return.update({'loss': self.loss})\n",
        "        output = sess.run(to_return, feed_dict)\n",
        "        if return_best_action:\n",
        "            output['best_action']=np.argmax(output['estimates'],axis=1)\n",
        "\n",
        "        return output\n",
        "\n",
        "    def run_update_weights(self, sess, train_step, weights):\n",
        "        feed_dict = {self._train_step:train_step}\n",
        "        for i, w in enumerate(weights):\n",
        "            feed_dict.update({self._new_trainables[i]:w})\n",
        "        _ = sess.run(self.assign_ops, feed_dict)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "-BUkkzzj20LP",
        "colab_type": "text"
      },
      "source": [
        "### zaksum"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ydqQhOWz213h",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "#https://pymotw.com/2/xml/etree/ElementTree/create.html\n",
        "\n",
        "from xml.etree import ElementTree\n",
        "from xml.dom import minidom\n",
        "from functools import reduce\n",
        "\n",
        "def prettify(elem):\n",
        "    \"\"\"Return a pretty-printed XML string for the Element.\n",
        "    \"\"\"\n",
        "    rough_string = ElementTree.tostring(elem, 'utf-8')\n",
        "    reparsed = minidom.parseString(rough_string)\n",
        "    return reparsed.toprettyxml(indent=\"  \")\n",
        "  \n",
        "from xml.etree.ElementTree import Element, SubElement, Comment\n",
        "\n",
        "\n",
        "def zaksum(article , reference , summary_array , directory_path):\n",
        "  top = Element('ZakSum')\n",
        "\n",
        "  comment = Comment('Generated by Amr Zaki without scores')\n",
        "  top.append(comment)\n",
        "\n",
        "  i=0\n",
        "  for summ in summary_array:\n",
        "    example = SubElement(top, 'example')\n",
        "    article_element   = SubElement(example, 'article')\n",
        "    article_element.text = article[i]\n",
        "\n",
        "    reference_element = SubElement(example, 'reference')\n",
        "    reference_element.text = reference[i]\n",
        "\n",
        "    summary_element   = SubElement(example, 'summary')\n",
        "    summary_element.text = summ\n",
        "    i+=1\n",
        "    \n",
        "  with open(directory_path, mode=\"w\") as f:\n",
        "    f.write(prettify(top))\n",
        "    #rough_string = ElementTree.tostring(top, 'utf-8')\n",
        "    #f.write(rough_string)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "e7ViuuP3IIeL",
        "colab_type": "text"
      },
      "source": [
        "### Run Main "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "dvV0aTajHL1U",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n",
        "# Modifications Copyright 2017 Abigail See\n",
        "#\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "#     http://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License.\n",
        "# ==============================================================================\n",
        "\n",
        "\"\"\"This is the top-level file to train, evaluate or test your summarization model\"\"\"\n",
        "\n",
        "import time\n",
        "import os\n",
        "import tensorflow as tf\n",
        "from collections import namedtuple\n",
        "#from data import Vocab\n",
        "#from batcher import Batcher\n",
        "#from model import SummarizationModel\n",
        "#from decode import BeamSearchDecoder\n",
        "#import util as util\n",
        "import numpy as np\n",
        "from glob import glob\n",
        "from tensorflow.python import debug as tf_debug\n",
        "#from replay_buffer import ReplayBuffer\n",
        "#from dqn import DQN\n",
        "from threading import Thread\n",
        "from tensorflow.python.ops import variable_scope\n",
        "from tensorflow.python.ops import array_ops\n",
        "from tensorflow.python.ops import gen_array_ops\n",
        "from tensorflow.python.ops import math_ops\n",
        "from tensorflow.python.ops.distributions import bernoulli\n",
        "\n",
        "\n",
        "#FLAGS = tf.app.flags.FLAGS\n",
        "#FLAGS.remove_flag_values(FLAGS.flag_values_dict()) #https://stackoverflow.com/questions/49916921/how-to-clear-tf-flags\n",
        "\n",
        "\n",
        "#https://stackoverflow.com/questions/40559667/how-to-redirect-tensorflow-logging-to-a-file\n",
        "import logging\n",
        "\n",
        "# get TF logger\n",
        "log = logging.getLogger('tensorflow')\n",
        "log.setLevel(logging.DEBUG)\n",
        "\n",
        "# create formatter and add it to the handlers\n",
        "formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n",
        "\n",
        "# create file handler which logs even debug messages\n",
        "fh = logging.FileHandler(tensorflow_log_file)\n",
        "fh.setLevel(logging.DEBUG)\n",
        "fh.setFormatter(formatter)\n",
        "log.addHandler(fh)\n",
        "\n",
        "\n",
        "class Seq2Seq(object):\n",
        "\n",
        "  def calc_running_avg_loss(self, loss, running_avg_loss, step, decay=0.99):\n",
        "    \"\"\"Calculate the running average loss via exponential decay.\n",
        "    This is used to implement early stopping w.r.t. a more smooth loss curve than the raw loss curve.\n",
        "    Args:\n",
        "      loss: loss on the most recent eval step\n",
        "      running_avg_loss: running_avg_loss so far\n",
        "      summary_writer: FileWriter object to write for tensorboard\n",
        "      step: training iteration step\n",
        "      decay: rate of exponential decay, a float between 0 and 1. Larger is smoother.\n",
        "    Returns:\n",
        "      running_avg_loss: new running average loss\n",
        "    \"\"\"\n",
        "    if running_avg_loss == 0:  # on the first iteration just take the loss\n",
        "      running_avg_loss = loss\n",
        "    else:\n",
        "      running_avg_loss = running_avg_loss * decay + (1 - decay) * loss\n",
        "    running_avg_loss = min(running_avg_loss, 12)  # clip\n",
        "    loss_sum = tf.Summary()\n",
        "    \n",
        "    tag_name = 'running_avg_loss/decay=%f' % (decay)\n",
        "    loss_sum.value.add(tag=tag_name, simple_value=running_avg_loss)\n",
        "    self.summary_writer.add_summary(loss_sum, step)\n",
        "    tf.logging.info('running_avg_loss: %f', running_avg_loss)\n",
        "    return running_avg_loss\n",
        "\n",
        "  def restore_best_model(self):\n",
        "    \"\"\"Load bestmodel file from eval directory, add variables for adagrad, and save to train directory\"\"\"\n",
        "    tf.logging.info(\"Restoring bestmodel for training...\")\n",
        "\n",
        "    # Initialize all vars in the model\n",
        "    #sess = tf.InteractiveSession(config=get_config())#me\n",
        "    sess = tf.Session(config=get_config())\n",
        "    print(\"Initializing all variables...\")\n",
        "    sess.run(tf.initialize_all_variables())\n",
        "    #tf.reset_default_graph() #me\n",
        "    # Restore the best model from eval dir\n",
        "    saver = tf.train.Saver([v for v in tf.all_variables() if \"Adagrad\" not in v.name])\n",
        "    print(\"Restoring all non-adagrad variables from best model in eval dir...\")\n",
        "    curr_ckpt = load_ckpt(saver, sess, \"eval\")\n",
        "    print(\"Restored %s.\" % curr_ckpt)\n",
        "\n",
        "    # Save this model to train dir and quit\n",
        "    new_model_name = curr_ckpt.split(\"/\")[-1].replace(\"bestmodel\", \"model\")\n",
        "    new_fname = os.path.join(FLAGS.log_root, \"train\", new_model_name)\n",
        "    print(\"Saving model to %s...\" % (new_fname))\n",
        "    \n",
        "    with open(log_file , \"a\") as logger_file:\n",
        "      logger_file.write(\"Saving model to %s...\" % (new_fname)+\"\\n\")\n",
        "\n",
        "    new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables\n",
        "    new_saver.save(sess, new_fname)\n",
        "    print(\"Saved.\")\n",
        "    exit()\n",
        "\n",
        "  def restore_best_eval_model(self):\n",
        "    # load best evaluation loss so far\n",
        "    best_loss = None\n",
        "    best_step = None\n",
        "    # goes through all event files and select the best loss achieved and return it\n",
        "    event_files = sorted(glob('{}/eval/events*'.format(FLAGS.log_root)))\n",
        "    for ef in event_files:\n",
        "      try:\n",
        "        for e in tf.train.summary_iterator(ef):\n",
        "          for v in e.summary.value:\n",
        "            step = e.step\n",
        "            if 'running_avg_loss/decay' in v.tag:\n",
        "              running_avg_loss = v.simple_value\n",
        "              if best_loss is None or running_avg_loss < best_loss:\n",
        "                best_loss = running_avg_loss\n",
        "                best_step = step\n",
        "      except:\n",
        "        continue\n",
        "    tf.logging.info('resotring best loss from the current logs: {}\\tstep: {}'.format(best_loss, best_step))\n",
        "    return best_loss\n",
        "\n",
        "  def convert_to_coverage_model(self):\n",
        "    \"\"\"Load non-coverage checkpoint, add initialized extra variables for coverage, and save as new checkpoint\"\"\"\n",
        "    tf.logging.info(\"converting non-coverage model to coverage model..\")\n",
        "\n",
        "    # initialize an entire coverage model from scratch\n",
        "    sess = tf.Session(config=get_config())\n",
        "    print(\"initializing everything...\")\n",
        "    sess.run(tf.global_variables_initializer())\n",
        "\n",
        "    # load all non-coverage weights from checkpoint\n",
        "    saver = tf.train.Saver([v for v in tf.global_variables() if \"coverage\" not in v.name and \"Adagrad\" not in v.name])\n",
        "    print(\"restoring non-coverage variables...\")\n",
        "    curr_ckpt = load_ckpt(saver, sess)\n",
        "    print(\"restored.\")\n",
        "\n",
        "    # save this model and quit\n",
        "    new_fname = curr_ckpt + '_cov_init'\n",
        "    print(\"saving model to %s...\" % (new_fname))\n",
        "    new_saver = tf.train.Saver() # this one will save all variables that now exist\n",
        "    new_saver.save(sess, new_fname)\n",
        "    print(\"saved.\")\n",
        "    exit()\n",
        "\n",
        "  def convert_to_reinforce_model(self,word_vector):\n",
        "    \"\"\"Load non-reinforce checkpoint, add initialized extra variables for reinforce, and save as new checkpoint\"\"\"\n",
        "    tf.logging.info(\"converting non-reinforce model to reinforce model..\")\n",
        "\n",
        "    # initialize an entire reinforce model from scratch\n",
        "    sess = tf.Session(config=get_config())\n",
        "    print(\"initializing everything...\")\n",
        "    sess.run(tf.global_variables_initializer(),feed_dict={self.model.embedding_place:word_vector})\n",
        "\n",
        "    # load all non-reinforce weights from checkpoint\n",
        "    saver = tf.train.Saver([v for v in tf.global_variables() if \"reinforce\" not in v.name and \"Adagrad\" not in v.name])\n",
        "    print(\"restoring non-reinforce variables...\")\n",
        "    curr_ckpt = load_ckpt(saver, sess)\n",
        "    print(\"restored.\")\n",
        "\n",
        "    # save this model and quit\n",
        "    new_fname = curr_ckpt + '_rl_init'\n",
        "    print(\"saving model to %s...\" % (new_fname))\n",
        "    new_saver = tf.train.Saver() # this one will save all variables that now exist\n",
        "    new_saver.save(sess, new_fname)\n",
        "    print(\"saved.\")\n",
        "    exit()\n",
        "\n",
        "  def setup_training(self):\n",
        "    \"\"\"Does setup before starting training (run_training)\"\"\"    \n",
        "    train_dir = os.path.join(FLAGS.log_root, \"train\")\n",
        "    if not os.path.exists(train_dir): os.makedirs(train_dir)\n",
        "    if FLAGS.ac_training:\n",
        "      dqn_train_dir = os.path.join(FLAGS.log_root, \"dqn\", \"train\")\n",
        "      if not os.path.exists(dqn_train_dir): os.makedirs(dqn_train_dir)\n",
        "    #replaybuffer_pcl_path = os.path.join(FLAGS.log_root, \"replaybuffer.pcl\")\n",
        "    #if not os.path.exists(dqn_target_train_dir): os.makedirs(dqn_target_train_dir)\n",
        "\n",
        "    self.model.build_graph() # build the graph\n",
        "\n",
        "    # Loads pre-trained word-embedding. By default the model learns the embedding.\n",
        "    if FLAGS.embedding:\n",
        "      self.vocab.LoadWordEmbedding(FLAGS.embedding, FLAGS.emb_dim)\n",
        "      word_vector = self.vocab.getWordEmbedding()\n",
        "      \n",
        "    if FLAGS.convert_to_reinforce_model:\n",
        "      assert (FLAGS.rl_training or FLAGS.ac_training), \"To convert your pointer model to a reinforce model, run with convert_to_reinforce_model=True and either rl_training=True or ac_training=True\"\n",
        "      self.convert_to_reinforce_model(word_vector)\n",
        "    if FLAGS.convert_to_coverage_model:\n",
        "      assert FLAGS.coverage, \"To convert your non-coverage model to a coverage model, run with convert_to_coverage_model=True and coverage=True\"\n",
        "      self.convert_to_coverage_model()\n",
        "    if FLAGS.restore_best_model:\n",
        "      self.restore_best_model()\n",
        "    saver = tf.train.Saver(max_to_keep=3) # keep 3 checkpoints at a time\n",
        "\n",
        "    self.sv = tf.train.Supervisor(logdir=train_dir,\n",
        "                       is_chief=True,\n",
        "                       saver=saver,\n",
        "                       summary_op=None,\n",
        "                       save_summaries_secs=60, # save summaries for tensorboard every 60 secs\n",
        "                       save_model_secs=60, # checkpoint every 60 secs\n",
        "                       global_step=self.model.global_step,\n",
        "                       init_feed_dict= {self.model.embedding_place:word_vector} if FLAGS.embedding else None\n",
        "                       )\n",
        "    self.summary_writer = self.sv.summary_writer\n",
        "    self.sess = self.sv.prepare_or_wait_for_session(config=get_config())\n",
        "    if FLAGS.ac_training:\n",
        "      tf.logging.info('DDQN building graph')\n",
        "      t1 = time.time()\n",
        "      # We create a separate graph for DDQN\n",
        "      self.dqn_graph = tf.Graph()\n",
        "      with self.dqn_graph.as_default():\n",
        "        self.dqn.build_graph() # build dqn graph\n",
        "        tf.logging.info('building current network took {} seconds'.format(time.time()-t1))\n",
        "\n",
        "        self.dqn_target.build_graph() # build dqn target graph\n",
        "        tf.logging.info('building target network took {} seconds'.format(time.time()-t1))\n",
        "\n",
        "        dqn_saver = tf.train.Saver(max_to_keep=3) # keep 3 checkpoints at a time\n",
        "        self.dqn_sv = tf.train.Supervisor(logdir=dqn_train_dir,\n",
        "                           is_chief=True,\n",
        "                           saver=dqn_saver,\n",
        "                           summary_op=None,\n",
        "                           save_summaries_secs=60, # save summaries for tensorboard every 60 secs\n",
        "                           save_model_secs=60, # checkpoint every 60 secs\n",
        "                           global_step=self.dqn.global_step,\n",
        "                           )\n",
        "        self.dqn_summary_writer = self.dqn_sv.summary_writer\n",
        "        self.dqn_sess = self.dqn_sv.prepare_or_wait_for_session(config=get_config())\n",
        "      ''' #### TODO: try loading a previously saved replay buffer\n",
        "      # right now this doesn't work due to running DQN on a thread\n",
        "      if os.path.exists(replaybuffer_pcl_path):\n",
        "        tf.logging.info('Loading Replay Buffer...')\n",
        "        try:\n",
        "          self.replay_buffer = pickle.load(open(replaybuffer_pcl_path, \"rb\"))\n",
        "          tf.logging.info('Replay Buffer loaded...')\n",
        "        except:\n",
        "          tf.logging.info('Couldn\\'t load Replay Buffer file...')\n",
        "          self.replay_buffer = ReplayBuffer(self.dqn_hps)\n",
        "      else:\n",
        "        self.replay_buffer = ReplayBuffer(self.dqn_hps)\n",
        "      tf.logging.info(\"Building DDQN took {} seconds\".format(time.time()-t1))\n",
        "      '''\n",
        "      self.replay_buffer = ReplayBuffer(self.dqn_hps)\n",
        "    tf.logging.info(\"Preparing or waiting for session...\")\n",
        "    tf.logging.info(\"Created session.\")\n",
        "    try:\n",
        "      self.run_training() # this is an infinite loop until interrupted\n",
        "    except (KeyboardInterrupt, SystemExit):\n",
        "      tf.logging.info(\"Caught keyboard interrupt on worker. Stopping supervisor...\")\n",
        "      self.sv.stop()\n",
        "      if FLAGS.ac_training:\n",
        "        self.dqn_sv.stop()\n",
        "\n",
        "  def run_training(self):\n",
        "    \"\"\"Repeatedly runs training iterations, logging loss to screen and writing summaries\"\"\"\n",
        "    tf.logging.info(\"Starting run_training\")\n",
        "    \n",
        "    with open(log_file , \"a\") as logger_file:\n",
        "      logger_file.write(\"starting run_training..\\n\")\n",
        "    \n",
        "    if FLAGS.debug: # start the tensorflow debugger\n",
        "      self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)\n",
        "      self.sess.add_tensor_filter(\"has_inf_or_nan\", tf_debug.has_inf_or_nan)\n",
        "\n",
        "    self.train_step = 0\n",
        "    if FLAGS.ac_training:\n",
        "      # DDQN training is done asynchronously along with model training\n",
        "      tf.logging.info('Starting DQN training thread...')\n",
        "      self.dqn_train_step = 0\n",
        "      self.thrd_dqn_training = Thread(target=self.dqn_training)\n",
        "      self.thrd_dqn_training.daemon = True\n",
        "      self.thrd_dqn_training.start()\n",
        "\n",
        "      watcher = Thread(target=self.watch_threads)\n",
        "      watcher.daemon = True\n",
        "      watcher.start()\n",
        "    # starting the main thread\n",
        "    tf.logging.info('Starting Seq2Seq training...')\n",
        "    \n",
        "    with open(log_file , \"a\") as logger_file:\n",
        "      logger_file.write(\"Starting Seq2Seq training...\\n\")\n",
        "      \n",
        "    while True: # repeats until interrupted\n",
        "      batch = self.batcher.next_batch()\n",
        "      t0=time.time()\n",
        "      if FLAGS.ac_training:\n",
        "        # For DDQN, we first collect the model output to calculate the reward and Q-estimates\n",
        "        # Then we fix the estimation either using our target network or using the true Q-values\n",
        "        # This process will usually take time and we are working on improving it.\n",
        "        transitions = self.model.collect_dqn_transitions(self.sess, batch, self.train_step, batch.max_art_oovs) # len(batch_size * k * max_dec_steps)\n",
        "        tf.logging.info('Q-values collection time: {}'.format(time.time()-t0))\n",
        "        # whenever we are working with the DDQN, we switch using DDQN graph rather than default graph\n",
        "        with self.dqn_graph.as_default():\n",
        "          batch_len = len(transitions)\n",
        "          # we use current decoder state to predict q_estimates, use_state_prime = False\n",
        "          b = ReplayBuffer.create_batch(self.dqn_hps, transitions,len(transitions), use_state_prime = False, max_art_oovs = batch.max_art_oovs)\n",
        "          # we also get the next decoder state to correct the estimation, use_state_prime = True\n",
        "          b_prime = ReplayBuffer.create_batch(self.dqn_hps, transitions,len(transitions), use_state_prime = True, max_art_oovs = batch.max_art_oovs)\n",
        "          # use current DQN to estimate values from current decoder state\n",
        "          dqn_results = self.dqn.run_test_steps(sess=self.dqn_sess, x= b._x, return_best_action=True)\n",
        "          q_estimates = dqn_results['estimates'] # shape (len(transitions), vocab_size)\n",
        "          dqn_best_action = dqn_results['best_action']\n",
        "          #dqn_q_estimate_loss = dqn_results['loss']\n",
        "\n",
        "          # use target DQN to estimate values for the next decoder state\n",
        "          dqn_target_results = self.dqn_target.run_test_steps(self.dqn_sess, x= b_prime._x)\n",
        "          q_vals_new_t = dqn_target_results['estimates'] # shape (len(transitions), vocab_size)\n",
        "\n",
        "          # we need to expand the q_estimates to match the input batch max_art_oov\n",
        "          # we use the q_estimate of UNK token for all the OOV tokens\n",
        "          q_estimates = np.concatenate([q_estimates,\n",
        "            np.reshape(q_estimates[:,0],[-1,1])*np.ones((len(transitions),batch.max_art_oovs))],axis=-1)\n",
        "          # modify Q-estimates using the result collected from current and target DQN.\n",
        "          # check algorithm 5 in the paper for more info: https://arxiv.org/pdf/1805.09461.pdf\n",
        "          for i, tr in enumerate(transitions):\n",
        "            if tr.done:\n",
        "              q_estimates[i][tr.action] = tr.reward\n",
        "            else:\n",
        "              q_estimates[i][tr.action] = tr.reward + FLAGS.gamma * q_vals_new_t[i][dqn_best_action[i]]\n",
        "          # use scheduled sampling to whether use true Q-values or DDQN estimation\n",
        "          if FLAGS.dqn_scheduled_sampling:\n",
        "            q_estimates = self.scheduled_sampling(batch_len, FLAGS.sampling_probability, b._y_extended, q_estimates)\n",
        "          if not FLAGS.calculate_true_q:\n",
        "            # when we are not training DDQN based on true Q-values,\n",
        "            # we need to update Q-values in our transitions based on the q_estimates we collected from DQN current network.\n",
        "            for trans, q_val in zip(transitions,q_estimates):\n",
        "              trans.q_values = q_val # each have the size vocab_extended\n",
        "          q_estimates = np.reshape(q_estimates, [FLAGS.batch_size, FLAGS.k, FLAGS.max_dec_steps, -1]) # shape (batch_size, k, max_dec_steps, vocab_size_extended)\n",
        "        # Once we are done with modifying Q-values, we can use them to train the DDQN model.\n",
        "        # In this paper, we use a priority experience buffer which always selects states with higher quality\n",
        "        # to train the DDQN. The following line will add batch_size * max_dec_steps experiences to the replay buffer.\n",
        "        # As mentioned before, the DDQN training is asynchronous. Therefore, once the related queues for DDQN training\n",
        "        # are full, the DDQN will start the training.\n",
        "        self.replay_buffer.add(transitions)\n",
        "        # If dqn_pretrain flag is on, it means that we use a fixed Actor to only collect experiences for\n",
        "        # DDQN pre-training\n",
        "        if FLAGS.dqn_pretrain:\n",
        "          tf.logging.info('RUNNNING DQN PRETRAIN: Adding data to relplay buffer only...')\n",
        "          continue\n",
        "        # if not, use the q_estimation to update the loss.\n",
        "        try:\n",
        "          results = self.model.run_train_steps(self.sess, batch, self.train_step, q_estimates)\n",
        "        except:\n",
        "          continue\n",
        "      else:\n",
        "        try:\n",
        "          results = self.model.run_train_steps(self.sess, batch, self.train_step)\n",
        "        except:\n",
        "          continue\n",
        "      t1=time.time()\n",
        "      # get the summaries and iteration number so we can write summaries to tensorboard\n",
        "      summaries = results['summaries'] # we will write these summaries to tensorboard using summary_writer\n",
        "      self.train_step = results['global_step'] # we need this to update our running average loss\n",
        "      tf.logging.info('seconds for training step {}: {}'.format(self.train_step, t1-t0))\n",
        "\n",
        "      printer_helper = {}\n",
        "      printer_helper['pgen_loss']= results['pgen_loss']\n",
        "      if FLAGS.coverage:\n",
        "        printer_helper['coverage_loss'] = results['coverage_loss']\n",
        "        if FLAGS.rl_training or FLAGS.ac_training:\n",
        "          printer_helper['rl_cov_total_loss']= results['reinforce_cov_total_loss']\n",
        "        else:\n",
        "          printer_helper['pointer_cov_total_loss'] = results['pointer_cov_total_loss']\n",
        "      if FLAGS.rl_training or FLAGS.ac_training:\n",
        "        printer_helper['shared_loss'] = results['shared_loss']\n",
        "        printer_helper['rl_loss'] = results['rl_loss']\n",
        "        printer_helper['rl_avg_logprobs'] = results['rl_avg_logprobs']\n",
        "      if FLAGS.rl_training:\n",
        "        printer_helper['sampled_r'] = np.mean(results['sampled_sentence_r_values'])\n",
        "        printer_helper['greedy_r'] = np.mean(results['greedy_sentence_r_values'])\n",
        "        printer_helper['r_diff'] = printer_helper['greedy_r'] - printer_helper['sampled_r']\n",
        "      if FLAGS.ac_training:\n",
        "        printer_helper['dqn_loss'] = np.mean(self.avg_dqn_loss) if len(self.avg_dqn_loss)>0 else 0\n",
        "\n",
        "      for (k,v) in printer_helper.items():\n",
        "        if not np.isfinite(v):\n",
        "          raise Exception(\"{} is not finite. Stopping.\".format(k))\n",
        "        tf.logging.info('{}: {}\\t'.format(k,v))\n",
        "        with open(log_file , \"a\") as logger_file:\n",
        "          logger_file.write(str(k )+\" : \" +str( v) +\"\\n\")\n",
        "          \n",
        "      with open(log_file , \"a\") as logger_file:\n",
        "          logger_file.write('-------------------------------------------')\n",
        "      tf.logging.info('-------------------------------------------')\n",
        "      \n",
        "      tf.logging.info('add_summary')\n",
        "      self.summary_writer.add_summary(summaries, self.train_step) # write the summaries\n",
        "      if self.train_step % 100 == 0: # flush the summary writer every so often\n",
        "        self.summary_writer.flush()\n",
        "      if FLAGS.ac_training:\n",
        "        self.dqn_summary_writer.flush()\n",
        "      ###if self.train_step > FLAGS.max_iter: break\n",
        "\n",
        "  def dqn_training(self):\n",
        "    \"\"\" training the DDQN network.\"\"\"\n",
        "    try:\n",
        "      while True:\n",
        "        if self.dqn_train_step == FLAGS.dqn_pretrain_steps: raise SystemExit()\n",
        "        _t = time.time()\n",
        "        self.avg_dqn_loss = []\n",
        "        avg_dqn_target_loss = []\n",
        "        # Get a batch of size dqn_batch_size from replay buffer to train the model\n",
        "        dqn_batch = self.replay_buffer.next_batch()\n",
        "        if dqn_batch is None:\n",
        "          tf.logging.info('replay buffer not loaded enough yet...')\n",
        "          time.sleep(60)\n",
        "          continue\n",
        "        # Run train step for Current DQN model and collect the results\n",
        "        dqn_results = self.dqn.run_train_steps(self.dqn_sess, dqn_batch)\n",
        "        # Run test step for Target DQN model and collect the results and monitor the difference in loss between the two\n",
        "        dqn_target_results = self.dqn_target.run_test_steps(self.dqn_sess, x=dqn_batch._x, y=dqn_batch._y, return_loss=True)\n",
        "        self.dqn_train_step = dqn_results['global_step']\n",
        "        self.dqn_summary_writer.add_summary(dqn_results['summaries'], self.dqn_train_step) # write the summaries\n",
        "        self.avg_dqn_loss.append(dqn_results['loss'])\n",
        "        avg_dqn_target_loss.append(dqn_target_results['loss'])\n",
        "        self.dqn_train_step = self.dqn_train_step + 1\n",
        "        tf.logging.info('seconds for training dqn model: {}'.format(time.time()-_t))\n",
        "        # UPDATING TARGET DDQN NETWORK WITH CURRENT MODEL\n",
        "        with self.dqn_graph.as_default():\n",
        "          current_model_weights = self.dqn_sess.run([self.dqn.model_trainables])[0] # get weights of current model\n",
        "          self.dqn_target.run_update_weights(self.dqn_sess, self.dqn_train_step, current_model_weights) # update target model weights with current model weights\n",
        "        tf.logging.info('DQN loss at step {}: {}'.format(self.dqn_train_step, np.mean(self.avg_dqn_loss)))\n",
        "        tf.logging.info('DQN Target loss at step {}: {}'.format(self.dqn_train_step, np.mean(avg_dqn_target_loss)))\n",
        "        # sleeping is required if you want the keyboard interuption to work\n",
        "        time.sleep(FLAGS.dqn_sleep_time)\n",
        "    except (KeyboardInterrupt, SystemExit):\n",
        "      tf.logging.info(\"Caught keyboard interrupt on worker. Stopping supervisor...\")\n",
        "      self.sv.stop()\n",
        "      self.dqn_sv.stop()\n",
        "\n",
        "  def watch_threads(self):\n",
        "    \"\"\"Watch example queue and batch queue threads and restart if dead.\"\"\"\n",
        "    while True:\n",
        "      time.sleep(60)\n",
        "      if not self.thrd_dqn_training.is_alive(): # if the thread is dead\n",
        "        tf.logging.error('Found DQN Learning thread dead. Restarting.')\n",
        "        self.thrd_dqn_training = Thread(target=self.dqn_training)\n",
        "        self.thrd_dqn_training.daemon = True\n",
        "        self.thrd_dqn_training.start()\n",
        "\n",
        "  def run_eval(self):\n",
        "    \"\"\"Repeatedly runs eval iterations, logging to screen and writing summaries. Saves the model with the best loss seen so far.\"\"\"\n",
        "    self.model.build_graph() # build the graph\n",
        "    saver = tf.train.Saver(max_to_keep=3) # we will keep 3 best checkpoints at a time\n",
        "    sess = tf.Session(config=get_config())\n",
        "\n",
        "    if FLAGS.embedding:\n",
        "      sess.run(tf.global_variables_initializer(),feed_dict={self.model.embedding_place:self.word_vector})\n",
        "    eval_dir = os.path.join(FLAGS.log_root, \"eval\") # make a subdir of the root dir for eval data\n",
        "    bestmodel_save_path = os.path.join(eval_dir, 'bestmodel') # this is where checkpoints of best models are saved\n",
        "    self.summary_writer = tf.summary.FileWriter(eval_dir)\n",
        "\n",
        "    if FLAGS.ac_training:\n",
        "      tf.logging.info('DDQN building graph')\n",
        "      t1 = time.time()\n",
        "      dqn_graph = tf.Graph()\n",
        "      with dqn_graph.as_default():\n",
        "        self.dqn.build_graph() # build dqn graph\n",
        "        tf.logging.info('building current network took {} seconds'.format(time.time()-t1))\n",
        "        self.dqn_target.build_graph() # build dqn target graph\n",
        "        tf.logging.info('building target network took {} seconds'.format(time.time()-t1))\n",
        "        dqn_saver = tf.train.Saver(max_to_keep=3) # keep 3 checkpoints at a time\n",
        "        dqn_sess = tf.Session(config=get_config())\n",
        "      dqn_train_step = 0\n",
        "      replay_buffer = ReplayBuffer(self.dqn_hps)\n",
        "\n",
        "    running_avg_loss = 0 # the eval job keeps a smoother, running average loss to tell it when to implement early stopping\n",
        "    best_loss = self.restore_best_eval_model()  # will hold the best loss achieved so far\n",
        "    train_step = 0\n",
        "\n",
        "    while True:\n",
        "      _ = load_ckpt(saver, sess) # load a new checkpoint\n",
        "      if FLAGS.ac_training:\n",
        "        _ = load_dqn_ckpt(dqn_saver, dqn_sess) # load a new checkpoint\n",
        "      processed_batch = 0\n",
        "      avg_losses = []\n",
        "      # evaluate for 100 * batch_size before comparing the loss\n",
        "      # we do this due to memory constraint, best to run eval on different machines with large batch size\n",
        "      while processed_batch < 100*FLAGS.batch_size:\n",
        "        processed_batch += FLAGS.batch_size\n",
        "        batch = self.batcher.next_batch() # get the next batch\n",
        "        if FLAGS.ac_training:\n",
        "          t0 = time.time()\n",
        "          transitions = self.model.collect_dqn_transitions(sess, batch, train_step, batch.max_art_oovs) # len(batch_size * k * max_dec_steps)\n",
        "          tf.logging.info('Q values collection time: {}'.format(time.time()-t0))\n",
        "          with dqn_graph.as_default():\n",
        "            # if using true Q-value to train DQN network,\n",
        "            # we do this as the pre-training for the DQN network to get better estimates\n",
        "            batch_len = len(transitions)\n",
        "            b = ReplayBuffer.create_batch(self.dqn_hps, transitions,len(transitions), use_state_prime = True, max_art_oovs = batch.max_art_oovs)\n",
        "            b_prime = ReplayBuffer.create_batch(self.dqn_hps, transitions,len(transitions), use_state_prime = True, max_art_oovs = batch.max_art_oovs)\n",
        "            dqn_results = self.dqn.run_test_steps(sess=dqn_sess, x= b._x, return_best_action=True)\n",
        "            q_estimates = dqn_results['estimates'] # shape (len(transitions), vocab_size)\n",
        "            dqn_best_action = dqn_results['best_action']\n",
        "\n",
        "            tf.logging.info('running test step on dqn_target')\n",
        "            dqn_target_results = self.dqn_target.run_test_steps(dqn_sess, x= b_prime._x)\n",
        "            q_vals_new_t = dqn_target_results['estimates'] # shape (len(transitions), vocab_size)\n",
        "\n",
        "            # we need to expand the q_estimates to match the input batch max_art_oov\n",
        "            q_estimates = np.concatenate([q_estimates,np.zeros((len(transitions),batch.max_art_oovs))],axis=-1)\n",
        "\n",
        "            tf.logging.info('fixing the action q-estimates')\n",
        "            for i, tr in enumerate(transitions):\n",
        "              if tr.done:\n",
        "                q_estimates[i][tr.action] = tr.reward\n",
        "              else:\n",
        "                q_estimates[i][tr.action] = tr.reward + FLAGS.gamma * q_vals_new_t[i][dqn_best_action[i]]\n",
        "            if FLAGS.dqn_scheduled_sampling:\n",
        "              tf.logging.info('scheduled sampling on q-estimates')\n",
        "              q_estimates = self.scheduled_sampling(batch_len, FLAGS.sampling_probability, b._y_extended, q_estimates)\n",
        "            if not FLAGS.calculate_true_q:\n",
        "              # when we are not training DQN based on true Q-values\n",
        "              # we need to update Q-values in our transitions based on this q_estimates we collected from DQN current network.\n",
        "              for trans, q_val in zip(transitions,q_estimates):\n",
        "                trans.q_values = q_val # each have the size vocab_extended\n",
        "            q_estimates = np.reshape(q_estimates, [FLAGS.batch_size, FLAGS.k, FLAGS.max_dec_steps, -1]) # shape (batch_size, k, max_dec_steps, vocab_size_extended)\n",
        "          tf.logging.info('run eval step on seq2seq model.')\n",
        "          t0=time.time()\n",
        "          results = self.model.run_eval_step(sess, batch, train_step, q_estimates)\n",
        "          t1=time.time()\n",
        "        else:\n",
        "          tf.logging.info('run eval step on seq2seq model.')\n",
        "          t0=time.time()\n",
        "          results = self.model.run_eval_step(sess, batch, train_step)\n",
        "          t1=time.time()\n",
        "\n",
        "        tf.logging.info('experiment: {}'.format(FLAGS.exp_name))\n",
        "        tf.logging.info('processed_batch: {}, seconds for batch: {}'.format(processed_batch, t1-t0))\n",
        "\n",
        "        printer_helper = {}\n",
        "        loss = printer_helper['pgen_loss']= results['pgen_loss']\n",
        "        if FLAGS.coverage:\n",
        "          printer_helper['coverage_loss'] = results['coverage_loss']\n",
        "          if FLAGS.rl_training or FLAGS.ac_training:\n",
        "            printer_helper['rl_cov_total_loss']= results['reinforce_cov_total_loss']\n",
        "          loss = printer_helper['pointer_cov_total_loss'] = results['pointer_cov_total_loss']\n",
        "        if FLAGS.rl_training or FLAGS.ac_training:\n",
        "          printer_helper['shared_loss'] = results['shared_loss']\n",
        "          printer_helper['rl_loss'] = results['rl_loss']\n",
        "          printer_helper['rl_avg_logprobs'] = results['rl_avg_logprobs']\n",
        "        if FLAGS.rl_training:\n",
        "          printer_helper['sampled_r'] = np.mean(results['sampled_sentence_r_values'])\n",
        "          printer_helper['greedy_r'] = np.mean(results['greedy_sentence_r_values'])\n",
        "          printer_helper['r_diff'] = printer_helper['greedy_r'] - printer_helper['sampled_r']\n",
        "        if FLAGS.ac_training:\n",
        "          printer_helper['dqn_loss'] = np.mean(self.avg_dqn_loss) if len(self.avg_dqn_loss) > 0 else 0\n",
        "\n",
        "        for (k,v) in printer_helper.items():\n",
        "          if not np.isfinite(v):\n",
        "            raise Exception(\"{} is not finite. Stopping.\".format(k))\n",
        "          tf.logging.info('{}: {}\\t'.format(k,v))\n",
        "\n",
        "        # add summaries\n",
        "        summaries = results['summaries']\n",
        "        train_step = results['global_step']\n",
        "        self.summary_writer.add_summary(summaries, train_step)\n",
        "\n",
        "        # calculate running avg loss\n",
        "        avg_losses.append(self.calc_running_avg_loss(np.asscalar(loss), running_avg_loss, train_step))\n",
        "        tf.logging.info('-------------------------------------------')\n",
        "\n",
        "      running_avg_loss = np.mean(avg_losses)\n",
        "      tf.logging.info('==========================================')\n",
        "      tf.logging.info('best_loss: {}\\trunning_avg_loss: {}\\t'.format(best_loss, running_avg_loss))\n",
        "      tf.logging.info('==========================================')\n",
        "\n",
        "      # If running_avg_loss is best so far, save this checkpoint (early stopping).\n",
        "      # These checkpoints will appear as bestmodel-<iteration_number> in the eval dir\n",
        "      if best_loss is None or running_avg_loss < best_loss:\n",
        "        tf.logging.info('Found new best model with %.3f running_avg_loss. Saving to %s', running_avg_loss, bestmodel_save_path)\n",
        "        saver.save(sess, bestmodel_save_path, global_step=train_step, latest_filename='checkpoint_best')\n",
        "        best_loss = running_avg_loss\n",
        "\n",
        "      # flush the summary writer every so often\n",
        "      if train_step % 100 == 0:\n",
        "        self.summary_writer.flush()\n",
        "      #time.sleep(600) # run eval every 10 minute\n",
        "\n",
        "  def main(self):\n",
        "    #if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly\n",
        "    #  raise Exception(\"Problem with flags: %s\" % unused_argv)\n",
        "\n",
        "    tf.reset_default_graph()\n",
        "\n",
        "    #FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)\n",
        "    tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want\n",
        "    tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))\n",
        "\n",
        "    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary\n",
        "    #flags = getattr(FLAGS,\"__flags\")\n",
        "\n",
        "    if not os.path.exists(FLAGS.log_root):\n",
        "      if FLAGS.mode==\"train\":\n",
        "        os.makedirs(FLAGS.log_root)\n",
        "      else:\n",
        "        raise Exception(\"Logdir %s doesn't exist. Run in train mode to create it.\" % (FLAGS.log_root))\n",
        "\n",
        "    fw = open('{}/config.txt'.format(FLAGS.log_root), 'w')\n",
        "    #for k, v in flags.items():\n",
        "    #  fw.write('{}\\t{}\\n'.format(k, v))\n",
        "    #fw.close()\n",
        "\n",
        "    self.vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary\n",
        "\n",
        "    # If in decode mode, set batch_size = beam_size\n",
        "    # Reason: in decode mode, we decode one example at a time.\n",
        "    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.\n",
        "    if FLAGS.mode == 'decode':\n",
        "      FLAGS.batch_size = FLAGS.beam_size\n",
        "\n",
        "    # If single_pass=True, check we're in decode mode\n",
        "    if FLAGS.single_pass and FLAGS.mode!='decode':\n",
        "      raise Exception(\"The single_pass flag should only be True in decode mode\")\n",
        "\n",
        "    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs\n",
        "\n",
        "    hparam_list = ['mode', 'lr', 'gpu_num',\n",
        "    #'sampled_greedy_flag', \n",
        "    'gamma', 'eta', \n",
        "    'fixed_eta', 'reward_function', 'intradecoder', \n",
        "    'use_temporal_attention', 'ac_training','rl_training', 'matrix_attention', 'calculate_true_q',\n",
        "    'enc_hidden_dim', 'dec_hidden_dim', 'k', \n",
        "    'scheduled_sampling', 'sampling_probability','fixed_sampling_probability',\n",
        "    'alpha', 'hard_argmax', 'greedy_scheduled_sampling',\n",
        "    'adagrad_init_acc', 'rand_unif_init_mag', \n",
        "    'trunc_norm_init_std', 'max_grad_norm', \n",
        "    'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps',\n",
        "    'dqn_scheduled_sampling', 'dqn_sleep_time', 'E2EBackProp',\n",
        "    'coverage', 'cov_loss_wt', 'pointer_gen']\n",
        "    hps_dict = {}\n",
        "    \n",
        "    flag_members = [attr for attr in dir(FLAGS) if not callable(getattr(FLAGS, attr)) and not attr.startswith(\"__\")]\n",
        "    for m in flag_members:\n",
        "        hps_dict[m] = getattr(FLAGS, m)\n",
        "        \n",
        "    if FLAGS.ac_training:\n",
        "      hps_dict.update({'dqn_input_feature_len':(FLAGS.dec_hidden_dim)})\n",
        "    self.hps = namedtuple(\"HParams\", hps_dict.keys())(**hps_dict)\n",
        "    # creating all the required parameters for DDQN model.\n",
        "    if FLAGS.ac_training:\n",
        "      hparam_list = ['lr', 'dqn_gpu_num', \n",
        "      'dqn_layers', \n",
        "      'dqn_replay_buffer_size', \n",
        "      'dqn_batch_size', \n",
        "      'dqn_target_update',\n",
        "      'dueling_net',\n",
        "      'dqn_polyak_averaging',\n",
        "      'dqn_sleep_time',\n",
        "      'dqn_scheduled_sampling',\n",
        "      'max_grad_norm']\n",
        "      hps_dict = {}\n",
        "      \n",
        "      flag_members = [attr for attr in dir(FLAGS) if not callable(getattr(FLAGS, attr)) and not attr.startswith(\"__\")]\n",
        "      for m in flag_members:\n",
        "        hps_dict[m] = getattr(FLAGS, m)\n",
        "    \n",
        "      hps_dict.update({'dqn_input_feature_len':(FLAGS.dec_hidden_dim)})\n",
        "      hps_dict.update({'vocab_size':self.vocab.size()})\n",
        "      self.dqn_hps = namedtuple(\"HParams\", hps_dict.keys())(**hps_dict)\n",
        "\n",
        "    # Create a batcher object that will create minibatches of data\n",
        "    self.batcher = Batcher(FLAGS.data_path, FLAGS.csv, self.vocab, self.hps, single_pass=FLAGS.single_pass, decode_after=FLAGS.decode_after)\n",
        "\n",
        "    tf.set_random_seed(111) # a seed value for randomness\n",
        "\n",
        "    if self.hps.mode == 'train':\n",
        "      print(\"creating model...\")\n",
        "      self.model = SummarizationModel(self.hps, self.vocab)\n",
        "      if FLAGS.ac_training:\n",
        "        # current DQN with paramters \\Psi\n",
        "        self.dqn = DQN(self.dqn_hps,'current')\n",
        "        # target DQN with paramters \\Psi^{\\prime}\n",
        "        self.dqn_target = DQN(self.dqn_hps,'target')\n",
        "      self.setup_training()\n",
        "    elif self.hps.mode == 'eval':\n",
        "      self.model = SummarizationModel(self.hps, self.vocab)\n",
        "      if FLAGS.ac_training:\n",
        "        self.dqn = DQN(self.dqn_hps,'current')\n",
        "        self.dqn_target = DQN(self.dqn_hps,'target')\n",
        "      self.run_eval()\n",
        "    elif self.hps.mode == 'decode':\n",
        "      decode_model_hps = self.hps  # This will be the hyperparameters for the decoder model\n",
        "      decode_model_hps = self.hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries\n",
        "      model = SummarizationModel(decode_model_hps, self.vocab)\n",
        "      if FLAGS.ac_training:\n",
        "        # We need our target DDQN network for collecting Q-estimation at each decoder step.\n",
        "        dqn_target = DQN(self.dqn_hps,'target')\n",
        "      else:\n",
        "        dqn_target = None\n",
        "      decoder = BeamSearchDecoder(model, self.batcher, self.vocab, dqn = dqn_target)\n",
        "      decoder.decode() # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)\n",
        "    else:\n",
        "      raise ValueError(\"The 'mode' flag must be one of train/eval/decode\")\n",
        "\n",
        "  # Scheduled sampling used for either selecting true Q-estimates or the DDQN estimation\n",
        "  # based on https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/ScheduledEmbeddingTrainingHelper\n",
        "  def scheduled_sampling(self, batch_size, sampling_probability, true, estimate):\n",
        "    with variable_scope.variable_scope(\"ScheduledEmbedding\"):\n",
        "      # Return -1s where we do not sample, and sample_ids elsewhere\n",
        "      select_sampler = bernoulli.Bernoulli(probs=sampling_probability, dtype=tf.bool)\n",
        "      select_sample = select_sampler.sample(sample_shape=batch_size)\n",
        "      sample_ids = array_ops.where(\n",
        "                  select_sample,\n",
        "                  tf.range(batch_size),\n",
        "                  gen_array_ops.fill([batch_size], -1))\n",
        "      where_sampling = math_ops.cast(\n",
        "          array_ops.where(sample_ids > -1), tf.int32)\n",
        "      where_not_sampling = math_ops.cast(\n",
        "          array_ops.where(sample_ids <= -1), tf.int32)\n",
        "      _estimate = array_ops.gather_nd(estimate, where_sampling)\n",
        "      _true = array_ops.gather_nd(true, where_not_sampling)\n",
        "\n",
        "      base_shape = array_ops.shape(true)\n",
        "      result1 = array_ops.scatter_nd(indices=where_sampling, updates=_estimate, shape=base_shape)\n",
        "      result2 = array_ops.scatter_nd(indices=where_not_sampling, updates=_true, shape=base_shape)\n",
        "      result = result1 + result2\n",
        "      return result1 + result2\n",
        "\n",
        "def main():\n",
        "  seq2seq = Seq2Seq()\n",
        "  seq2seq.main()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "yn3MxAybHj68",
        "colab_type": "text"
      },
      "source": [
        "## Train"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "KbiV0YM0O0MO",
        "colab_type": "text"
      },
      "source": [
        "### Build dict"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "_2_-bx_XXEaS",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "from nltk.tokenize import word_tokenize\n",
        "import collections\n",
        "\n",
        "def build_dict(train_article_list,VOCAB_SIZE):\n",
        "    vocab_counter = collections.Counter()\n",
        "\n",
        "    progress = ProgressBar(len(train_article_list ), fmt=ProgressBar.FULL)\n",
        "    for sentence in train_article_list :\n",
        "        words = list()\n",
        "        for word in word_tokenize(sentence):\n",
        "            words.append(word)\n",
        "        vocab_counter.update(words)\n",
        "        progress.current += 1\n",
        "        progress()\n",
        "    progress.done()\n",
        "    \n",
        "    print (\"Writing vocab file...\")\n",
        "    with open(os.path.join(pickle_path, \"vocab\"), 'w', encoding=\"utf-8\") as writer:\n",
        "      for word, count in vocab_counter.most_common(VOCAB_SIZE):\n",
        "        writer.write(word + ' ' + str(count) + '\\n')\n",
        "    print (\"Finished writing vocab file\")\n",
        "    return "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0WnTaVf6jQwy",
        "colab_type": "code",
        "outputId": "af7a7169-ef77-42ab-f752-07aa96e86c0c",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 204
        }
      },
      "source": [
        "import pandas as pd\n",
        "\n",
        "reviews = pd.read_csv(default_path + \"HindiNewsBook.csv\")\n",
        "reviews.shape\n",
        "reviews.isnull().sum()\n",
        "reviews = reviews.dropna()\n",
        "reviews = reviews.reset_index(drop=True)\n",
        "reviews.head()"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "<div>\n",
              "<style scoped>\n",
              "    .dataframe tbody tr th:only-of-type {\n",
              "        vertical-align: middle;\n",
              "    }\n",
              "\n",
              "    .dataframe tbody tr th {\n",
              "        vertical-align: top;\n",
              "    }\n",
              "\n",
              "    .dataframe thead th {\n",
              "        text-align: right;\n",
              "    }\n",
              "</style>\n",
              "<table border=\"1\" class=\"dataframe\">\n",
              "  <thead>\n",
              "    <tr style=\"text-align: right;\">\n",
              "      <th></th>\n",
              "      <th>सेtext</th>\n",
              "      <th>title</th>\n",
              "    </tr>\n",
              "  </thead>\n",
              "  <tbody>\n",
              "    <tr>\n",
              "      <th>0</th>\n",
              "      <td>पाकिस्तान से खबरें संगीन रंगीन हैं। पापा जरदार...</td>\n",
              "      <td>बिलावल-हिना का खूबसूरत घोटाला</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>1</th>\n",
              "      <td>इन 5 बातों का हर भाई रखे ख्याल, नहीं तो रिश्तो...</td>\n",
              "      <td>Read useful articles about relationships from ...</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>2</th>\n",
              "      <td>छत्तीसगढ़ में इंसानियत को शर्मसार कर देने वाली...</td>\n",
              "      <td>जमीनी विवाद में भाभी और भतीजी को उतारा मौत के घाट</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>3</th>\n",
              "      <td>एनसीसी की गर्ल कैडेट को पोर्न वीडियो भेजने पर ...</td>\n",
              "      <td>NCC गर्ल कैडेट को पोर्न क्लिप भेजता था मेजर जन...</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>4</th>\n",
              "      <td>देश, दुनिया, खेल, बिजनेस और बॉलीवुड में क्‍या ...</td>\n",
              "      <td>Breaking News:एक क्लिक में पढ़ें गुरुवार दिनभर...</td>\n",
              "    </tr>\n",
              "  </tbody>\n",
              "</table>\n",
              "</div>"
            ],
            "text/plain": [
              "                                              सेtext                                              title\n",
              "0  पाकिस्तान से खबरें संगीन रंगीन हैं। पापा जरदार...                      बिलावल-हिना का खूबसूरत घोटाला\n",
              "1  इन 5 बातों का हर भाई रखे ख्याल, नहीं तो रिश्तो...  Read useful articles about relationships from ...\n",
              "2  छत्तीसगढ़ में इंसानियत को शर्मसार कर देने वाली...  जमीनी विवाद में भाभी और भतीजी को उतारा मौत के घाट\n",
              "3  एनसीसी की गर्ल कैडेट को पोर्न वीडियो भेजने पर ...  NCC गर्ल कैडेट को पोर्न क्लिप भेजता था मेजर जन...\n",
              "4  देश, दुनिया, खेल, बिजनेस और बॉलीवुड में क्‍या ...  Breaking News:एक क्लिक में पढ़ें गुरुवार दिनभर..."
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 22
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "oILyfJvtOKBX",
        "colab_type": "code",
        "outputId": "1aba24e5-5d85-4c0c-f68b-6094ecc0fe23",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "Text = []\n",
        "Summary = []\n",
        "\n",
        "progress = ProgressBar(len(reviews), fmt=ProgressBar.FULL)\n",
        "\n",
        "for index , row in reviews.iterrows():\n",
        "  Text.append(row[\"सेtext\"])\n",
        "  Summary.append(row[\"title\"])\n",
        "  progress.current += 1\n",
        "  progress()\n",
        "progress.done()"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "[========================================] 57277/57277 (100%)     0 to go\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "d_uuQ4DQbLLj",
        "colab_type": "code",
        "outputId": "12e9fa47-8b7c-4dbb-9ed3-20e1bb5d1522",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 68
        }
      },
      "source": [
        "build_dict(reviews.सेtext,200000)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "[========================================] 57277/57277 (100%)     0 to go\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "Writing vocab file...\n",
            "Finished writing vocab file\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "_lA1gzm7P-Ly",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df= None\n",
        "Text= None\n",
        "Summary= None\n",
        "Data= None"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Ww6wYED7O5Z0",
        "colab_type": "text"
      },
      "source": [
        "### Train"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "KoSZqBs7P5Pe",
        "colab_type": "code",
        "outputId": "4c73a62d-f866-4117-d86f-9ee242866134",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 204
        }
      },
      "source": [
        "import pandas as pd\n",
        "\n",
        "reviews = pd.read_csv(default_path + \"HindiNewsBook.csv\")\n",
        "reviews.shape\n",
        "reviews.isnull().sum()\n",
        "reviews = reviews.dropna()\n",
        "reviews = reviews.reset_index(drop=True)\n",
        "reviews.head()"
      ],
      "execution_count": 19,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "<div>\n",
              "<style scoped>\n",
              "    .dataframe tbody tr th:only-of-type {\n",
              "        vertical-align: middle;\n",
              "    }\n",
              "\n",
              "    .dataframe tbody tr th {\n",
              "        vertical-align: top;\n",
              "    }\n",
              "\n",
              "    .dataframe thead th {\n",
              "        text-align: right;\n",
              "    }\n",
              "</style>\n",
              "<table border=\"1\" class=\"dataframe\">\n",
              "  <thead>\n",
              "    <tr style=\"text-align: right;\">\n",
              "      <th></th>\n",
              "      <th>सेtext</th>\n",
              "      <th>title</th>\n",
              "    </tr>\n",
              "  </thead>\n",
              "  <tbody>\n",
              "    <tr>\n",
              "      <th>0</th>\n",
              "      <td>पाकिस्तान से खबरें संगीन रंगीन हैं। पापा जरदार...</td>\n",
              "      <td>बिलावल-हिना का खूबसूरत घोटाला</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>1</th>\n",
              "      <td>इन 5 बातों का हर भाई रखे ख्याल, नहीं तो रिश्तो...</td>\n",
              "      <td>Read useful articles about relationships from ...</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>2</th>\n",
              "      <td>छत्तीसगढ़ में इंसानियत को शर्मसार कर देने वाली...</td>\n",
              "      <td>जमीनी विवाद में भाभी और भतीजी को उतारा मौत के घाट</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>3</th>\n",
              "      <td>एनसीसी की गर्ल कैडेट को पोर्न वीडियो भेजने पर ...</td>\n",
              "      <td>NCC गर्ल कैडेट को पोर्न क्लिप भेजता था मेजर जन...</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>4</th>\n",
              "      <td>देश, दुनिया, खेल, बिजनेस और बॉलीवुड में क्‍या ...</td>\n",
              "      <td>Breaking News:एक क्लिक में पढ़ें गुरुवार दिनभर...</td>\n",
              "    </tr>\n",
              "  </tbody>\n",
              "</table>\n",
              "</div>"
            ],
            "text/plain": [
              "                                              सेtext                                              title\n",
              "0  पाकिस्तान से खबरें संगीन रंगीन हैं। पापा जरदार...                      बिलावल-हिना का खूबसूरत घोटाला\n",
              "1  इन 5 बातों का हर भाई रखे ख्याल, नहीं तो रिश्तो...  Read useful articles about relationships from ...\n",
              "2  छत्तीसगढ़ में इंसानियत को शर्मसार कर देने वाली...  जमीनी विवाद में भाभी और भतीजी को उतारा मौत के घाट\n",
              "3  एनसीसी की गर्ल कैडेट को पोर्न वीडियो भेजने पर ...  NCC गर्ल कैडेट को पोर्न क्लिप भेजता था मेजर जन...\n",
              "4  देश, दुनिया, खेल, बिजनेस और बॉलीवुड में क्‍या ...  Breaking News:एक क्लिक में पढ़ें गुरुवार दिनभर..."
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 19
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "uz_50XkKdQWw",
        "colab_type": "code",
        "outputId": "c66fad0d-0571-4751-bf8c-65ac6d559887",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "#import sys\n",
        "#reload(sys)\n",
        "#sys.setdefaultencoding('utf-8')\n",
        "\n",
        "class flags_:\n",
        "  pass\n",
        "FLAGS = flags_()\n",
        "\n",
        "article = []\n",
        "reference = []\n",
        "summary = []\n",
        "\n",
        "default_path = \"drive/My Drive/Hindi_News/\"\n",
        "data_path = \"drive/My Drive/Colab Notebooks/Model 4_5/arabic_finished_files_200k_correct/\" #not used\n",
        "\n",
        "# Where to find data\n",
        "FLAGS.data_path = data_path + 'chunked/train_*' #not used\n",
        "FLAGS.vocab_path = default_path + 'pickles/vocab'\n",
        "\n",
        "# Important settings\n",
        "FLAGS.mode = 'train' \n",
        "FLAGS.single_pass = False  #true --> decode      false--> train\n",
        "FLAGS.decode_after = 0\n",
        "FLAGS.decode_from = 'train'\n",
        "\n",
        "if FLAGS.mode =='train':\n",
        "  FLAGS.csv = reviews[1000:] #train\n",
        "elif FLAGS.mode =='decode':\n",
        "  FLAGS.csv = reviews[:1000] #test #reviews[-1000:] #test\n",
        "  \n",
        "# Where to save output\n",
        "FLAGS.log_root = default_path +'logs_6_12'  \n",
        "FLAGS.exp_name = 'scheduled-sampling-hardargmax-greedy'\n",
        "\n",
        "# batcher parameter#, for consistent results#, set all these parameters to 1\n",
        "FLAGS.example_queue_threads = 4\n",
        "FLAGS.batch_queue_threads   = 2\n",
        "FLAGS.bucketing_cache_size  = 100\n",
        "\n",
        "# Hyperparameters\n",
        "\n",
        "FLAGS.enc_hidden_dim= 256##, 'dimension of RNN hidden states')\n",
        "FLAGS.dec_hidden_dim= 256##, 'dimension of RNN hidden states')\n",
        "FLAGS.emb_dim= 150 # 'dimension of word embeddings')\n",
        "FLAGS.batch_size=20# , 'minibatch size')\n",
        "FLAGS.max_enc_steps= 400 #100#, 'max timesteps of encoder (max source text tokens)')\n",
        "FLAGS.max_dec_steps= 15 #20#, 'max timesteps of decoder (max summary tokens)')\n",
        "FLAGS.beam_size= 35##, 'beam size for beam search decoding.')\n",
        "FLAGS.min_dec_steps= 20 #20##, 'Minimum sequence length of generated summary. Applies only for beam search decoding mode')\n",
        "FLAGS.max_iter= 20000  #40000##, 'max number of iterations')\n",
        "FLAGS.vocab_size= 50000##, 'Size of vocabulary. These will be read from the vocabulary file in order. If the vocabulary file contains fewer words than this number#, or if this number is set to 0#, will take all words in the vocabulary file.')\n",
        "FLAGS.lr= 0.15##, 'learning rate')\n",
        "FLAGS.adagrad_init_acc= 0.1##, 'initial accumulator value for Adagrad')\n",
        "FLAGS.rand_unif_init_mag= 0.02##, 'magnitude for lstm cells random uniform inititalization')\n",
        "FLAGS.trunc_norm_init_std= 1e-4##, 'std of trunc norm init#, used for initializing everything else')\n",
        "FLAGS.max_grad_norm= 5.0##, 'for gradient clipping')\n",
        "FLAGS.embedding= default_path +\"model_hindi.model\" #False#None##, 'path to the pre-trained embedding file')\n",
        "FLAGS.gpu_num= 0##, 'which gpu to use to train the model')\n",
        "\n",
        "# Pointer-generator or baseline model\n",
        "FLAGS.pointer_gen= True##, 'If True#, use pointer-generator model. If False#, use baseline model.')\n",
        "FLAGS.avoid_trigrams= True##, 'Avoids trigram during decoding')\n",
        "FLAGS.share_decoder_weights= False##, 'Share output matrix projection with word embedding') # Eq 13. in https://arxiv.org/pdf/1705.04304.pdf\n",
        "\n",
        "# Pointer-generator with Self-Critic policy gradient: https://arxiv.org/pdf/1705.04304.pdf\n",
        "FLAGS.rl_training= False #True#, 'Use policy-gradient training by collecting rewards at the end of sequence.')\n",
        "FLAGS.self_critic= True#, 'Uses greedy sentence reward as baseline.')\n",
        "FLAGS.use_discounted_rewards= False#, 'Whether to use discounted rewards.')\n",
        "FLAGS.use_intermediate_rewards= False#, 'Whether to use intermediate rewards.')\n",
        "FLAGS.convert_to_reinforce_model= False #True#, 'Convert a pointer model to a reinforce model. Turn this on and run in train mode.\n",
        "#Your current training model will be copied to a new version (same name with _cov_init appended) \n",
        "#that will be ready to run with coverage flag turned on#, for the coverage training stage.')\n",
        "FLAGS.intradecoder= True#, #%# 'Use intradecoder attention or not')\n",
        "FLAGS.use_temporal_attention=  True# #%#, 'Whether to use temporal attention or not')\n",
        "FLAGS.matrix_attention= False#, 'Use matrix attention#, Eq. 2 https://arxiv.org/pdf/1705.04304.pdf')\n",
        "FLAGS.eta= 2.5E-05#, 'RL/MLE scaling factor#, 1 means use RL loss#, 0 means use MLE loss')\n",
        "FLAGS.fixed_eta= False#, 'Use fixed value for eta or adaptive based on global step')\n",
        "FLAGS.gamma= 0.99#, 'discount factor')\n",
        "FLAGS.reward_function= 'rouge_l/f_score'#, 'either bleu or one of the rouge measures (rouge_1/f_score#,rouge_2/f_score#,rouge_l/f_score)')\n",
        "\n",
        "# parameters of DDQN model\n",
        "FLAGS.ac_training= False#, 'Use Actor-Critic learning by DDQN.')\n",
        "FLAGS.dqn_scheduled_sampling= True #, 'Whether to use scheduled sampling to use estimates of dqn model vs the actual q-estimates values')\n",
        "FLAGS.dqn_layers= '512,256,128'#, 'DQN dense hidden layer size#, will create three dense layers with 512#, 256#, and 128 size')\n",
        "FLAGS.dqn_replay_buffer_size= 100000#, 'Size of the replay buffer')\n",
        "FLAGS.dqn_batch_size= 100#, 'Batch size for training the DDQN model')\n",
        "FLAGS.dqn_target_update= 10000#, 'Update target Q network every 10000 steps')\n",
        "FLAGS.dqn_sleep_time= 2#, 'Train DDQN model every 2 seconds')\n",
        "FLAGS.dqn_gpu_num= 0#, 'GPU number to train the DDQN')\n",
        "FLAGS.dueling_net= True#, 'Whether to use Duelling Network to train the model') # https://arxiv.org/pdf/1511.06581.pdf\n",
        "FLAGS.dqn_polyak_averaging= True#, 'Whether to use polyak averaging to update the target network parameters')\n",
        "FLAGS.calculate_true_q= False#, \"Whether to use true Q-values to train DQN or use DQN's estimates to train it\")\n",
        "FLAGS.dqn_pretrain= False#, \"Pretrain the DDQN network with fixed Actor model\")\n",
        "FLAGS.dqn_pretrain_steps= 10000#, 'Number of steps to pre-train the DDQN')\n",
        "\n",
        "#scheduled sampling parameters#, https://arxiv.org/pdf/1506.03099.pdf\n",
        "# At each time step t and for each sequence in the batch#, we get the input to next decoding step by either\n",
        "#   (1) sampling from the final distribution at (t-1)#, or\n",
        "#   (2) reading from input_decoder_embedding.\n",
        "# We do (1) with probability sampling_probability and (2) with 1 - sampling_probability.\n",
        "# Using sampling_probability=0.0 is equivalent to using only the ground truth data (no sampling).\n",
        "# Using sampling_probability=1.0 is equivalent to doing inference by only relying on the sampled token generated at each decoding step\n",
        "FLAGS.scheduled_sampling= True#, 'whether to do scheduled sampling or not')\n",
        "FLAGS.decay_function= 'linear'#,'linear#, exponential#, inv_sigmoid') #### TODO: implement this\n",
        "FLAGS.sampling_probability= 2.5E-05#, 'epsilon value for choosing ground-truth or model output')\n",
        "FLAGS.fixed_sampling_probability= False#, 'Whether to use fixed sampling probability or adaptive based on global step')\n",
        "FLAGS.hard_argmax= True#, 'Whether to use soft argmax or hard argmax')\n",
        "FLAGS.greedy_scheduled_sampling= True#, 'Whether to use greedy approach or sample for the output#, if True it uses greedy')\n",
        "FLAGS.E2EBackProp= False#, 'Whether to use E2EBackProp algorithm to solve exposure bias')\n",
        "FLAGS.alpha= 1#, 'soft argmax argument')\n",
        "FLAGS.k= 1#, 'number of samples')\n",
        "FLAGS.scheduled_sampling_final_dist= True#, 'Whether to use final distribution or vocab distribution for scheduled sampling')\n",
        "\n",
        "# Coverage hyperparameters\n",
        "FLAGS.coverage= False#, 'Use coverage mechanism. Note#, the experiments reported in the ACL paper train WITHOUT coverage until converged#, and then train for a short phase WITH coveragFLAGS.e afterwards. i.e. to reproduce the results in the ACL paper#, turn this off for most of training then turn on for a short phase at the end.')\n",
        "FLAGS.cov_loss_wt= 1.0#, 'Weight of coverage loss (lambda in the paper). If zero#, then no incentive to minimize coverage loss.')\n",
        "\n",
        "# Utility flags#, for restoring and changing checkpoints\n",
        "FLAGS.convert_to_coverage_model= False#, 'Convert a non-coverage model to a coverage model. Turn this on and run in train mode. Your current training model will be copied to a new version (same name with _cov_init appended) that will be ready to run with coverage flag turned on#, for the coverage training stage.')\n",
        "FLAGS.restore_best_model= False#, 'Restore the best model in the eval/ dir and save it in the train/ dir#, ready to be used for further training. Useful for early stopping#, or if your training checkpoint has become corrupted with e.g. NaN values.')\n",
        "\n",
        "# Debugging. See https://www.tensorflow.org/programmers_guide/debugger\n",
        "FLAGS.debug= False#, \"Run in tensorflow's debug mode (watches for NaN/inf values)\")\n",
        "\n",
        "seq2seq = Seq2Seq()\n",
        "seq2seq.main()"
      ],
      "execution_count": 21,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:Starting seq2seq_attention in decode mode...\n",
            "Duplicate: by\n",
            "Duplicate: a\n",
            "Duplicate: the\n",
            "Duplicate: we\n",
            "Duplicate: it\n",
            "Duplicate: video\n",
            "Duplicate: this\n",
            "Duplicate: up\n",
            "Duplicate: my\n",
            "Duplicate: live\n",
            "Duplicate: am\n",
            "Duplicate: day\n",
            "Duplicate: vs\n",
            "Duplicate: now\n",
            "Duplicate: in\n",
            "Duplicate: he\n",
            "Duplicate: news\n",
            "Duplicate: all\n",
            "Duplicate: you\n",
            "Duplicate: world\n",
            "Duplicate: new\n",
            "Duplicate: hai\n",
            "Duplicate: jha\n",
            "Duplicate: police\n",
            "Duplicate: and\n",
            "Duplicate: no\n",
            "Duplicate: what\n",
            "Duplicate: ind\n",
            "Duplicate: t\n",
            "Duplicate: mi\n",
            "Duplicate: one\n",
            "Duplicate: to\n",
            "Duplicate: video\n",
            "Duplicate: today\n",
            "Duplicate: if\n",
            "Duplicate: of\n",
            "Duplicate: on\n",
            "Duplicate: so\n",
            "Duplicate: there\n",
            "Duplicate: ji\n",
            "Duplicate: happy\n",
            "Duplicate: for\n",
            "Duplicate: they\n",
            "Duplicate: us\n",
            "Duplicate: here\n",
            "Duplicate: i\n",
            "Duplicate: love\n",
            "Duplicate: with\n",
            "Duplicate: will\n",
            "Duplicate: live\n",
            "Duplicate: party\n",
            "Duplicate: birthday\n",
            "Duplicate: s\n",
            "Duplicate: govt\n",
            "Duplicate: government\n",
            "Duplicate: but\n",
            "Duplicate: thank\n",
            "Duplicate: in\n",
            "Duplicate: first\n",
            "Duplicate: when\n",
            "Duplicate: u\n",
            "Duplicate: office\n",
            "Duplicate: watch\n",
            "Duplicate: is\n",
            "Duplicate: total\n",
            "Duplicate: elections\n",
            "Duplicate: case\n",
            "Duplicate: she\n",
            "Duplicate: as\n",
            "Duplicate: x\n",
            "Duplicate: cricket\n",
            "Duplicate: our\n",
            "Duplicate: special\n",
            "Duplicate: m\n",
            "Duplicate: court\n",
            "Duplicate: now\n",
            "Duplicate: watch\n",
            "Duplicate: after\n",
            "Duplicate: an\n",
            "Duplicate: update\n",
            "Duplicate: team\n",
            "Duplicate: sir\n",
            "Duplicate: mishra\n",
            "Duplicate: more\n",
            "Duplicate: assembly\n",
            "Duplicate: photo\n",
            "Duplicate: election\n",
            "Duplicate: people\n",
            "Duplicate: pm\n",
            "Duplicate: weekend\n",
            "Duplicate: d\n",
            "Duplicate: best\n",
            "Duplicate: attack\n",
            "Duplicate: result\n",
            "Duplicate: your\n",
            "Duplicate: movie\n",
            "Duplicate: state\n",
            "Duplicate: how\n",
            "Duplicate: from\n",
            "Duplicate: why\n",
            "Duplicate: not\n",
            "Duplicate: se\n",
            "Duplicate: home\n",
            "Duplicate: it\n",
            "Duplicate: let\n",
            "Duplicate: can\n",
            "Duplicate: updates\n",
            "Duplicate: film\n",
            "Duplicate: welcome\n",
            "Duplicate: week\n",
            "Duplicate: beingsalmankhan\n",
            "Duplicate: oppo\n",
            "Duplicate: national\n",
            "Duplicate: please\n",
            "Duplicate: do\n",
            "Duplicate: posted\n",
            "Duplicate: pic\n",
            "Duplicate: results\n",
            "Duplicate: women\n",
            "Duplicate: get\n",
            "Duplicate: former\n",
            "Duplicate: house\n",
            "Duplicate: good\n",
            "Duplicate: verdict\n",
            "Duplicate: two\n",
            "Duplicate: media\n",
            "Duplicate: full\n",
            "Duplicate: pics\n",
            "Duplicate: security\n",
            "Duplicate: r\n",
            "Duplicate: n\n",
            "Duplicate: biz\n",
            "Duplicate: rs\n",
            "Duplicate: match\n",
            "Duplicate: sanjay\n",
            "Duplicate: that\n",
            "Duplicate: file\n",
            "Duplicate: act\n",
            "Duplicate: may\n",
            "Duplicate: top\n",
            "Duplicate: 6s\n",
            "Duplicate: kashyap\n",
            "Duplicate: savern\n",
            "Duplicate: v\n",
            "Duplicate: kya\n",
            "Duplicate: his\n",
            "Duplicate: test\n",
            "Duplicate: great\n",
            "Duplicate: me\n",
            "Duplicate: pak\n",
            "Duplicate: list\n",
            "Duplicate: final\n",
            "Duplicate: look\n",
            "Duplicate: vs\n",
            "Duplicate: xs\n",
            "Duplicate: kumar\n",
            "Duplicate: road\n",
            "Duplicate: official\n",
            "Duplicate: high\n",
            "Duplicate: year\n",
            "Duplicate: justice\n",
            "Duplicate: visuals\n",
            "Duplicate: news\n",
            "Duplicate: hospital\n",
            "Duplicate: song\n",
            "Duplicate: india\n",
            "Duplicate: l\n",
            "Duplicate: story\n",
            "Duplicate: just\n",
            "Duplicate: tax\n",
            "Duplicate: manoj\n",
            "Duplicate: see\n",
            "Duplicate: has\n",
            "Duplicate: exclusive\n",
            "Duplicate: play\n",
            "Duplicate: amazon\n",
            "Duplicate: go\n",
            "Duplicate: mother\n",
            "Duplicate: investigation\n",
            "Duplicate: at\n",
            "Duplicate: have\n",
            "Duplicate: read\n",
            "Duplicate: air\n",
            "Duplicate: another\n",
            "Duplicate: hope\n",
            "Duplicate: district\n",
            "Duplicate: ka\n",
            "Duplicate: films\n",
            "Duplicate: power\n",
            "Duplicate: only\n",
            "Duplicate: be\n",
            "Duplicate: isro\n",
            "Duplicate: director\n",
            "Duplicate: de\n",
            "Duplicate: fire\n",
            "Duplicate: man\n",
            "Duplicate: who\n",
            "Duplicate: most\n",
            "Duplicate: life\n",
            "Duplicate: post\n",
            "Duplicate: class\n",
            "Duplicate: show\n",
            "Duplicate: updates\n",
            "Duplicate: terror\n",
            "Duplicate: not\n",
            "Duplicate: season\n",
            "Duplicate: ca\n",
            "Duplicate: polls\n",
            "Duplicate: saddened\n",
            "Duplicate: also\n",
            "Duplicate: directed\n",
            "Duplicate: wedding\n",
            "Duplicate: khan\n",
            "Duplicate: family\n",
            "Duplicate: 4k\n",
            "Duplicate: open\n",
            "Duplicate: are\n",
            "Duplicate: ban\n",
            "Duplicate: online\n",
            "Duplicate: coming\n",
            "Duplicate: board\n",
            "Duplicate: minister\n",
            "Duplicate: super\n",
            "Duplicate: airport\n",
            "Duplicate: men\n",
            "Duplicate: crime\n",
            "Duplicate: condolences\n",
            "Duplicate: thanks\n",
            "Duplicate: priyanka\n",
            "Duplicate: wish\n",
            "Duplicate: view\n",
            "Duplicate: series\n",
            "Duplicate: ram\n",
            "Duplicate: girl\n",
            "Duplicate: station\n",
            "Duplicate: medical\n",
            "Duplicate: states\n",
            "Duplicate: public\n",
            "Duplicate: buy\n",
            "Duplicate: keep\n",
            "Duplicate: trust\n",
            "Duplicate: date\n",
            "Duplicate: dear\n",
            "Duplicate: big\n",
            "Duplicate: review\n",
            "Duplicate: anand\n",
            "Duplicate: about\n",
            "Duplicate: war\n",
            "Duplicate: indian\n",
            "Duplicate: car\n",
            "Duplicate: temple\n",
            "Duplicate: even\n",
            "Duplicate: mobile\n",
            "Duplicate: fake\n",
            "Duplicate: mission\n",
            "Duplicate: photos\n",
            "Duplicate: chief\n",
            "Duplicate: time\n",
            "Duplicate: written\n",
            "Duplicate: update\n",
            "Duplicate: congratulations\n",
            "Duplicate: yeh\n",
            "Duplicate: report\n",
            "Duplicate: law\n",
            "Duplicate: had\n",
            "Duplicate: inside\n",
            "Duplicate: launch\n",
            "Duplicate: award\n",
            "Duplicate: actor\n",
            "Duplicate: three\n",
            "Duplicate: rain\n",
            "Duplicate: war\n",
            "Duplicate: health\n",
            "Duplicate: operation\n",
            "Duplicate: xi\n",
            "Duplicate: who\n",
            "Duplicate: democracy\n",
            "Duplicate: jai\n",
            "Duplicate: playing\n",
            "Duplicate: seat\n",
            "Duplicate: don\n",
            "Duplicate: twitter\n",
            "Duplicate: officer\n",
            "Duplicate: politics\n",
            "Duplicate: ii\n",
            "Duplicate: like\n",
            "Duplicate: economic\n",
            "Duplicate: murder\n",
            "Duplicate: at\n",
            "Duplicate: its\n",
            "Duplicate: times\n",
            "Duplicate: met\n",
            "Duplicate: water\n",
            "Duplicate: is\n",
            "Duplicate: day\n",
            "Duplicate: kapil\n",
            "Duplicate: bank\n",
            "Duplicate: students\n",
            "Duplicate: singh\n",
            "Duplicate: major\n",
            "Duplicate: image\n",
            "Duplicate: collection\n",
            "Duplicate: should\n",
            "Duplicate: space\n",
            "Duplicate: head\n",
            "Duplicate: out\n",
            "Duplicate: ali\n",
            "Duplicate: main\n",
            "Duplicate: days\n",
            "Duplicate: ke\n",
            "Duplicate: service\n",
            "Duplicate: exam\n",
            "Duplicate: latest\n",
            "Duplicate: press\n",
            "Duplicate: sad\n",
            "Duplicate: every\n",
            "Duplicate: price\n",
            "Duplicate: was\n",
            "Duplicate: children\n",
            "Duplicate: fashion\n",
            "Duplicate: 5s\n",
            "Duplicate: anushkasharma\n",
            "Duplicate: accident\n",
            "Duplicate: services\n",
            "Duplicate: pics\n",
            "Duplicate: stay\n",
            "Duplicate: some\n",
            "Duplicate: boss\n",
            "Duplicate: crosses\n",
            "Duplicate: international\n",
            "Duplicate: release\n",
            "Duplicate: never\n",
            "Duplicate: sabha\n",
            "Duplicate: hit\n",
            "Duplicate: strike\n",
            "Duplicate: such\n",
            "Duplicate: recruitment\n",
            "Duplicate: education\n",
            "Duplicate: star\n",
            "Duplicate: account\n",
            "Duplicate: breaking\n",
            "Duplicate: mi.com\n",
            "Duplicate: app\n",
            "Duplicate: drive\n",
            "Duplicate: ho\n",
            "Duplicate: details\n",
            "Duplicate: both\n",
            "Duplicate: aap\n",
            "Duplicate: kabaddi\n",
            "Duplicate: god\n",
            "Duplicate: shah\n",
            "Duplicate: chowdhury\n",
            "Duplicate: scam\n",
            "Duplicate: viral\n",
            "Duplicate: next\n",
            "Duplicate: death\n",
            "Duplicate: force\n",
            "Duplicate: finally\n",
            "Duplicate: while\n",
            "Duplicate: rescue\n",
            "Duplicate: last\n",
            "Duplicate: or\n",
            "Duplicate: school\n",
            "Duplicate: aus\n",
            "Duplicate: card\n",
            "Duplicate: whatsapp\n",
            "Duplicate: check\n",
            "Duplicate: cr\n",
            "Duplicate: army\n",
            "Duplicate: extremely\n",
            "Duplicate: instagram\n",
            "Duplicate: mahindra\n",
            "Duplicate: senior\n",
            "Duplicate: budget\n",
            "Duplicate: courtesy\n",
            "Duplicate: well\n",
            "Duplicate: session\n",
            "Duplicate: ki\n",
            "Duplicate: youth\n",
            "Duplicate: part\n",
            "Duplicate: yes\n",
            "Duplicate: third\n",
            "Duplicate: cup\n",
            "Duplicate: really\n",
            "Duplicate: entertainment\n",
            "Duplicate: officers\n",
            "Duplicate: hum\n",
            "Duplicate: president\n",
            "Duplicate: due\n",
            "Duplicate: body\n",
            "Duplicate: india\n",
            "Duplicate: highest\n",
            "Duplicate: money\n",
            "Duplicate: sharma\n",
            "Duplicate: group\n",
            "Duplicate: does\n",
            "Duplicate: trailer\n",
            "Duplicate: these\n",
            "Duplicate: iii\n",
            "Duplicate: k\n",
            "Duplicate: request\n",
            "Duplicate: off\n",
            "Duplicate: being\n",
            "Duplicate: red\n",
            "Duplicate: voter\n",
            "Duplicate: weather\n",
            "Duplicate: wicket\n",
            "Duplicate: border\n",
            "Duplicate: united\n",
            "Duplicate: love\n",
            "Duplicate: pro\n",
            "Duplicate: big\n",
            "Duplicate: social\n",
            "Duplicate: sorry\n",
            "Duplicate: depth\n",
            "Duplicate: very\n",
            "Duplicate: desh\n",
            "Duplicate: note\n",
            "Duplicate: everything\n",
            "Duplicate: personal\n",
            "Duplicate: updated\n",
            "Duplicate: instagram\n",
            "Duplicate: loc\n",
            "Duplicate: raj\n",
            "Duplicate: peace\n",
            "Duplicate: jio\n",
            "Duplicate: ye\n",
            "Duplicate: ban\n",
            "Duplicate: of\n",
            "Duplicate: boy\n",
            "Duplicate: karva\n",
            "Duplicate: second\n",
            "Duplicate: hot\n",
            "Duplicate: tv\n",
            "Duplicate: her\n",
            "Duplicate: eyes\n",
            "Duplicate: salute\n",
            "Duplicate: blessed\n",
            "Duplicate: spoke\n",
            "Duplicate: prime\n",
            "Duplicate: modi\n",
            "Duplicate: stars\n",
            "Duplicate: voting\n",
            "Duplicate: aur\n",
            "Duplicate: freedom\n",
            "Duplicate: dream\n",
            "Duplicate: b\n",
            "Duplicate: link\n",
            "Duplicate: miss\n",
            "Duplicate: catch\n",
            "Duplicate: foreign\n",
            "Duplicate: then\n",
            "Duplicate: iphone\n",
            "Duplicate: sharing\n",
            "Duplicate: over\n",
            "Duplicate: moon\n",
            "Duplicate: general\n",
            "Duplicate: term\n",
            "Duplicate: vivoprokabaddi\n",
            "Duplicate: mbps\n",
            "Duplicate: oneplus\n",
            "Duplicate: zenfone\n",
            "Duplicate: sunnyleone\n",
            "Duplicate: respect\n",
            "Duplicate: metoo\n",
            "Duplicate: light\n",
            "Duplicate: download\n",
            "Duplicate: form\n",
            "Duplicate: minal\n",
            "Duplicate: before\n",
            "Duplicate: search\n",
            "Duplicate: pollution\n",
            "Duplicate: central\n",
            "Duplicate: their\n",
            "Duplicate: box\n",
            "Duplicate: credit\n",
            "Duplicate: the\n",
            "Duplicate: nation\n",
            "Duplicate: official\n",
            "Duplicate: staff\n",
            "Duplicate: gold\n",
            "Duplicate: opening\n",
            "Duplicate: action\n",
            "Duplicate: leader\n",
            "Duplicate: defence\n",
            "Duplicate: order\n",
            "Duplicate: scheme\n",
            "Duplicate: rest\n",
            "Duplicate: wishing\n",
            "Duplicate: tweet\n",
            "Duplicate: aadmi\n",
            "Duplicate: night\n",
            "Duplicate: once\n",
            "Duplicate: teachers\n",
            "Duplicate: mr\n",
            "Duplicate: encounter\n",
            "Duplicate: control\n",
            "Duplicate: hotel\n",
            "Duplicate: because\n",
            "Duplicate: aap\n",
            "Duplicate: bhai\n",
            "Duplicate: breaking\n",
            "Duplicate: relief\n",
            "Duplicate: moment\n",
            "Duplicate: find\n",
            "Duplicate: development\n",
            "Duplicate: am\n",
            "Duplicate: deeply\n",
            "Duplicate: under\n",
            "Duplicate: pok\n",
            "Duplicate: during\n",
            "Duplicate: and\n",
            "Duplicate: related\n",
            "Duplicate: since\n",
            "Duplicate: doctors\n",
            "Duplicate: aam\n",
            "Duplicate: heartfelt\n",
            "Duplicate: always\n",
            "Duplicate: songs\n",
            "Duplicate: set\n",
            "Duplicate: actress\n",
            "Duplicate: mon\n",
            "Duplicate: zee5\n",
            "Duplicate: disha\n",
            "Duplicate: kiss\n",
            "Duplicate: mom\n",
            "Duplicate: videos\n",
            "Duplicate: making\n",
            "Duplicate: o\n",
            "Duplicate: starplus\n",
            "Duplicate: negi\n",
            "Duplicate: amal\n",
            "Duplicate: region\n",
            "Duplicate: race\n",
            "Duplicate: summit\n",
            "Duplicate: black\n",
            "Duplicate: called\n",
            "max_size of vocab was specified as 50000; we now have 50000 words. Stopping reading.\n",
            "INFO:tensorflow:Building graph...\n",
            "INFO:tensorflow:batch_size 35, attn_size: 512, emb_size: 150\n",
            "INFO:tensorflow:Adding attention_decoder timestep 0 of 1\n",
            "INFO:tensorflow:Time to build graph: 2 seconds\n",
            "INFO:tensorflow:Loading checkpoint drive/My Drive/Hindi_News/logs_6_12/train/model.ckpt-6895\n",
            "INFO:tensorflow:Restoring parameters from drive/My Drive/Hindi_News/logs_6_12/train/model.ckpt-6895\n",
            "['पाकिस्तान', 'से', 'संगीन', 'रंगीन', 'रंगीन', 'घोटालों', 'में', 'उलझे', 'उलझे', 'हुए', 'हिना', 'बेटे', 'बिलावल', 'जरदारी', 'जरदारी']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 1\n",
            "['हर', '5', 'बातों', 'का', 'हर', 'भाई', 'रखे', 'ख्याल,', 'तो', 'रिश्तों', 'में', '5', 'अक्टूबर', 'अक्टूबर', '2019']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 2\n",
            "['छत्तीसगढ़', 'में', 'इंसानियत', 'को', 'बचाने', 'वाली', 'भाभी', 'और', 'भतीजी', 'और', 'भतीजी', 'की', 'लाठी-डंडों', 'से', 'पीट-पीटकर']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 3\n",
            "['एनसीसी', 'की', 'गर्ल', 'कैडेट', 'जनरल', 'के', 'खिलाफ', 'भारतीय', 'सेना', 'मार्शल', 'रैंक', 'के', 'अधिकारी', 'के', 'खिलाफ']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 4\n",
            "['देश,', 'बिजनेस', 'और', 'बॉलीवुड', 'में', 'क्\\u200dया', 'हुआ?', 'जानने', 'के', 'लिए', 'यहां', 'के', 'साथ', 'लाइव', 'अपडेशन.']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 5\n",
            "['पुलिस', 'ने', 'बॉलीवुड', 'में', 'क्\\u200dया', 'हुआ?', 'जानने', 'के', 'लिए', 'यहां', 'पहुंचे', 'सीएम', 'योगी', 'का', 'स्वागत']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 6\n",
            "['रोड', 'शो', 'के', 'दौरान', 'रॉयल', 'बेंगलुरु', 'को', '4', 'विकेट', 'से', 'हराया', 'देश,', 'पीएम', 'मोदी', 'की']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 7\n",
            "['नोटबंदी', 'के', 'लिए', 'यहां', 'के', 'साथ', 'लाइव', 'अपडेशन.', 'एशियाडः', 'करोड़', 'में', '42', 'मरीजों', 'को', 'बचाया']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 8\n",
            "['भारत', 'ने', 'श्रीलंका', 'को', '7', 'विकेट', 'से', 'हराया', 'देश,', 'यहां', 'दिल्ली', 'में', 'कांग्रेस', 'की', 'इमरजेंसी']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 9\n",
            "['11:23', 'और', 'जापानी', 'की', 'चेतावनी', 'की', 'गई', '11:23', 'PM', 'मोदी', 'ने', 'कहा-', 'अच्छी', 'दिशा', 'में']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 10\n",
            "['पाकिस्तान', 'ने', 'बॉलीवुड', 'में', 'क्\\u200dया', 'हुआ?', 'जानने', 'के', 'लिए', 'यहां', 'के', 'साथ', '15', 'करोड़', 'का']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 11\n",
            "['ट्रक', 'और', 'बॉलीवुड', 'में', 'क्\\u200dया', 'जानने', 'के', 'लिए', 'यहां', 'से', 'मिलेगा', '10:53', 'PM', 'मुफ्ती', 'से']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 12\n",
            "['सुप्रीम', 'कोर्ट', 'की', 'सरकार', 'ने', 'बॉलीवुड', 'में', 'क्\\u200dया', 'हुआ?', 'जानने', 'के', 'लिए', 'खबरों', 'का', 'मामला']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 13\n",
            "['माइकल', 'फैलन', 'के', 'आरोपों', 'के', 'बाद', 'माइकल', 'फैलन', 'ने', 'अपने', 'पद', 'से', 'इस्तीफा', 'दिया', 'इस्तीफा']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 14\n",
            "['सईद', 'ने', 'भारत', 'में', 'कई', 'आतंकी', 'घटनाओं', 'का', 'साजिश', 'रचता', 'जानिए', 'भारत', 'का', 'मोस्ट', 'वॉन्टेड']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 15\n",
            "['ब्रिटेन', 'के', 'उप', 'प्रधानमंत्री', 'और', 'गंगा', 'के', 'सफाई', 'अभियान', 'में', 'प्रधानमंत्री', 'मोदी', 'से', 'मिले', 'गंगा']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 16\n",
            "['चुनावी', 'नतीजों', 'से', 'पहले', 'लगातार', 'दूसरे', 'दिन', 'में', 'बहार', 'का', 'आलम', '23,897', 'सेंसेक्स', '23,839', 'सेंसेक्स']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 17\n",
            "['मोबाइल', 'यूजर्स', 'के', 'लिए', 'अगले', 'साल', 'के', 'लिए', 'बढ़ाने', 'का', 'फैसला', 'किया', 'जाएगा', 'रोमिंग', 'चार्ज']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 18\n",
            "['भारतीय', 'जियो', 'के', 'सस्ते', 'टैरिफ', 'को', 'मात', 'देने', 'के', 'लिए', 'है', 'तैयार.', 'जानिए', 'कंपनी', 'के']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 19\n",
            "['उत्तराखंड', 'में', 'सत्ता', 'की', 'दौड़', 'को', 'कांग्रेस', 'और', 'बीजेपी', 'ने', 'कहा', 'सरकार', 'बनाने', 'का', 'दावा']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 20\n",
            "['आयकर', 'बढ़ाकर', 'कर', 'रही', 'संसद', 'की', 'एक', 'लाख', '5', 'लाख', 'पर', 'छूट', 'देने', 'का', 'भी']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 21\n",
            "['राष्\\u200dट्रपति', 'ने', 'अभिभाषण', 'के', 'दौरान', 'राष्\\u200dट्रपति', 'प्रतिभा', 'पाटील', 'के', 'लिए', 'सरकार', 'ने', 'अच्\\u200dछे', 'विकासदर', 'को']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 22\n",
            "['बजट', 'आजतक', 'के', 'दूसरे', 'सेशन', 'में', 'नोटबंदी', 'के', 'लिए', 'नोटबंदी', 'से', 'पैदा', 'हुई', 'दिक्कत', 'दिक्कतों']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 23\n",
            "['बुलंदशहर', 'में', '3', 'दिसंबर', 'को', 'हुई', 'हिंसा', 'के', 'मामले', 'में', 'आरोपी', 'जीतू', 'को', 'पुलिस', 'ने']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 24\n",
            "['25', 'साल', 'बाद', 'भाकपा', 'माओवादी', 'का', 'चेहरा', 'बदल', 'सकता', 'है', 'बसवराजू?', 'बसवराजू', 'की', 'बैठक', 'में']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 25\n",
            "['अंडमान', 'में', 'बाहरी', 'दुनिया', 'के', 'लोगों', 'की', 'आमद', 'खुलने', 'की', 'जिद', 'गंवानी', 'नहीं', ':', 'एसपी']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 26\n",
            "['रामायण', 'और', 'जानकी', 'मंदिर', 'जोड़े', 'जा', 'रहे', 'पीएम', 'मोदी', 'ने', 'रामकथा', 'सेवा', 'का', 'उद्घाटन', 'करते']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 27\n",
            "['नए', 'मंत्रियों', 'के', 'शपथ', 'ग्रहण', 'समारोह', 'में', 'CM', 'देवेंद्र', 'फड़नवीस', 'ने', '10', 'नए', 'मंत्रियों', 'को']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 28\n",
            "['लोकसभा', 'चुनाव', 'के', 'लिए', 'आम', 'आदमी', 'पार्टी', 'को', 'चंदा', 'जुटाने', 'का', 'प्लान', 'बनाया', 'नया', 'कैंपेन']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 29\n",
            "['दूरसंचार', 'मानदंडों', 'को', 'पूरा', 'नहीं', 'करेंगे,', 'उन', 'से', 'कम', '5', 'लाख', 'का', 'जुर्माना', 'लगाया', 'जुर्माना']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 30\n",
            "['एयरसेल', 'के', 'मालिक', 'राकेश', 'अस्थाना', 'के', 'बीच', 'लड़ी', 'लड़ाई', 'में', 'सभी', 'भागीदार', 'का', 'हाथ', 'है']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 31\n",
            "['जीएसटी', 'में', 'रजिस्ट्रेशन', 'रद्द', 'करने', 'के', 'लिए', 'जीएसटीएन', 'पोर्टल', 'पर', 'मिलेगा', 'नया', 'ऑप्शन', 'रजिस्ट्रेशन', 'होगा']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 32\n",
            "['पुरुष', 'कभी', 'भी', 'महिलाओं', 'के', 'सिर्फ', 'महिलाओं', 'के', 'दोस्त', 'नहीं', 'महिलाओं', 'के', 'कारण', 'महिलाओं', 'के']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 33\n",
            "['कंगना', 'रनौत', 'फिल्म', 'फेस्टिवल', 'का', 'आगाज', 'होने', 'जा', 'रहा', 'ऐश्वर्या', 'राय,', 'सोनम', 'कपूर', 'और', 'हिना']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 34\n",
            "['अमित', 'शाह', 'ने', 'करतारपुर', 'कॉरिडोर', 'में', 'जिस', 'ब्रिज', 'का', 'निर्माण', 'के', 'लिए', 'भारत', 'पर', 'दबाव']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 35\n",
            "['महेंद्र', 'सिंह', 'और', 'विकेटकीपर', 'के', 'तौर', 'पर', 'सबसे', 'दुनिया', 'में', 'सबसे', 'अधिक', 'रन', 'बनाने', 'वाले']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 36\n",
            "['दिलबाग', 'सिंह', 'को', 'राज्य', 'की', 'जेलों', 'में', 'डीजीपी', 'के', 'पद', 'से', 'हटाया', 'रहा', 'हैं', 'दिलबाग']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 37\n",
            "['जिम', 'में', 'रहना', 'हर', 'किसी', 'के', 'लिए', 'हमारी', 'मानसिकता', 'बदल', 'रही', 'हैं', 'फिटनेस', 'ट्रेनर:', 'फिटनेस']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 38\n",
            "['बीजेपी', 'नेता', 'गिरिराज', 'सिंह', 'पर', 'देवघर', 'में', 'चुनावी', 'सभा', 'में', 'गिरिराज', 'सिंह', '18', 'अप्रैल', 'को']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 39\n",
            "['नीति', 'आयोग', 'के', 'सीईओ', 'ने', 'कहा', 'सरकार', 'डिजिटल', 'ट्रांजेक्शन', 'को', 'बढ़ावा', 'देने', 'के', 'लिए', 'कैशलेस']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 40\n",
            "['नोएडा', 'में', 'कैश', 'की', 'किल्लत', 'खत्म', 'होने', 'के', 'कारण', 'ज्यादातर', 'इलाकों', 'के', 'एटीएम', 'का', 'इंतजार']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 41\n",
            "['देश', 'में', 'जैसे-जैसे', 'कैशलेस', 'कैशलेस', 'अर्थव्यवस्था', 'की', 'संख्या', 'नए', 'उच्चस्तर', 'पर', 'साइबर', 'हमलों', 'का', 'प्रयास']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 42\n",
            "['आरबीआई', 'ने', 'बैंक', 'खातों', 'से', 'पैसे', 'निकालने', 'के', 'लिए', 'पैन', 'नंबर', '60', 'लाख', 'से', 'ज्यादा']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 43\n",
            "['नरेंद्र', 'मोदी', 'की', 'लहर', 'से', 'उत्तर', 'प्रदेश', 'में', 'जातिवाद', 'के', 'घिनौने', 'स्वरूप', 'को', 'जबर्दस्त', 'चोट']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 44\n",
            "['आज', 'चुनाव', 'के', 'आखिरी', 'चरण', 'में', '41', 'सीटों', 'पर', 'डाले', 'जा', 'रहे', 'मताधिकार', 'का', 'इस्तेमाल']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 45\n",
            "['ऑस्कर', 'के', 'दौरान', '50', 'मामलों', 'में', 'ऑस्कर', 'की', 'भविष्यवाणी', 'कर', 'रहे', 'हैं', 'मृत्यु', 'का', 'अनुमान']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 46\n",
            "['बोफोर्स', 'मामले', 'में', 'कांग्रेस', 'पर', 'हमला', 'करने', 'के', 'लिए', 'सरकार', 'से', 'मंजूरी', 'देने', 'का', 'आग्रह']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 47\n",
            "['भारत', 'सरकार', 'ने', 'घाटी', 'में', 'जवानों', 'की', 'तैनाती', 'बढ़ाई', 'तैनाती', 'जा', 'रही', 'है', 'ऐतिहासिक', 'एलान']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 48\n",
            "['सीबीआई', 'विवाद', 'अभी', 'कांग्रेस', 'ने', 'प्रधानमंत्री', 'को', 'चिट्ठी', 'लिख', 'दिया', 'CVC', 'कांग्रेस', 'नेता', 'मल्लिकार्जुन', 'खड़गे']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 49\n",
            "['राजेश', 'सिंह', 'ने', 'एटीएस', 'के', 'अंतिम', 'संस्कार', 'को', 'अपर', 'पुलिस', 'के', 'पास', 'खुदकुशी', 'का', 'जानकारी']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 50\n",
            "['एयरसेल', 'के', 'मालिक', 'भी', '2जी', 'घोटाले', 'में', 'उच्चतम', 'न्यायालय', 'की', 'निगाह', 'जांच', 'के', 'संदर्भ', 'में']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 51\n",
            "['प्रधानमंत्री', 'नरेंद्र', 'मोदी', 'के', 'निर्देश', 'पर', 'सीबीआई', 'देशभर', 'में', 'हो', 'रही', 'तलाशी', '25', 'से', 'ज्यादा']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 52\n",
            "['पाकिस्तान', 'ने', 'भारत', 'से', '1965,', 'भारत', 'की', 'मजबूत', 'अर्थव्यवस्था', 'और', 'सुपीरियर', 'मिल्रिटी', 'के', 'सामने', 'टिक']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 53\n",
            "['ट्रिपल', 'तलाक', 'के', 'मामले', 'में', 'केंद्र', 'सरकार', 'ने', 'सुप्रीम', 'कोर्ट', 'में', 'ट्रिपल', 'तलाक', 'का', 'विरोध']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 54\n",
            "['नीतीश', 'ने', 'कहा', 'महंगाई', 'के', 'कारण', 'नहीं', 'उठाने', 'के', 'लिए', 'नहीं', 'कह', 'रही', 'है', 'महंगाई']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 55\n",
            "['हिंसा', 'में', 'हालात', 'को', 'काबू', 'करने', 'के', 'लिए', 'केंद्रीय', 'हालात', 'में', '39', 'लोगों', 'की', 'मौत']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 56\n",
            "['पहली', 'पारी', 'में', 'सचिन', 'ने', 'टेस्\\u200dट', 'करियर', 'का', '50वां', 'शतक', 'ली', 'पाटने', 'का', 'काम', 'किया']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 57\n",
            "['दिल्ली', 'में', '50', 'साल', 'पहले', 'चला', 'जाएगा', 'दिल्ली', 'सरकार', 'की', 'हालिया', 'बैठक', 'में', 'यह', 'फैसला']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 58\n",
            "['रेल', 'हादसे', 'के', 'बाद', 'कानपुर', 'रेल', 'हादसे', 'में', 'रेल', 'हादसों', 'की', 'साजिश', 'रचने', 'का', 'आदेश']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 59\n",
            "['इसरो', 'गगनयान', 'मिशन', 'पर', 'अब', 'इसरो', 'गगनयान', 'गगनयान', 'मिशन', 'गगनयान', 'मिशन', 'के', 'बाद', 'अब', 'इसरो']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 60\n",
            "['बस्तर', 'के', '159', 'आदिवासी', 'बल', 'पर', 'आसमान', 'में', 'भी', 'सुराख', 'हो', 'सकता', 'हिंसा', 'का', 'श्रेय']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 61\n",
            "['छत्तीसगढ़', 'में', 'कांग्रेस', 'का', 'चुनाव', 'सातवें', 'आसमान', 'पर', 'बनाने', 'का', 'निर्णय', 'साबित', 'हुआ', \"'जन\", 'फिर']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 62\n",
            "['एक', 'मई', 'से', 'भ्रष्\\u200dटाचार', 'के', 'खिलाफ', 'महाराष्ट्र', 'में', 'अन्ना', 'और', 'बाल', 'ठाकरे', 'की', 'मुलाकात', 'पर']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 63\n",
            "['फातिमा', 'लतीफ', 'मामले', 'में', '30', 'से', 'तमिलनाडु', 'युवा', 'कांग्रेस', 'के', 'लगभग', 'आईआईटी', 'कैंपस', 'के', 'बाहर']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 64\n",
            "['देवी', 'की', 'पूजा', 'शुरू', 'तिथि', 'को', 'नहाय-खाय', 'का', 'मुहूर्त', 'का', 'दिन', 'पड़', 'रही', 'हैं', 'छठ']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 65\n",
            "['छत्तीसगढ़', 'में', 'पहले', 'चरण', 'का', 'आखिरी', 'दिन', 'है', 'राम', 'मंदिर', 'का', 'भव्य', 'मंदिर', 'बन', 'रही']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 66\n",
            "['पुलिस', 'और', 'नक्सलियों', 'के', 'बीच', 'जबर्दस्त', 'मुठभेड़', 'के', 'साथ', 'मुठभेड़', 'के', 'दौरान', 'पुलिस', 'मुठभेड़', 'में']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 67\n",
            "['बिजली', 'वाले', 'घरों', 'में', 'बिजली', 'तिहार', 'रमन', 'सिंह', 'की', 'योजना', 'को', 'चुनाव', 'भुनाने', 'का', 'प्रयास']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 68\n",
            "['नक्सल', 'विरोधी', 'अभियान', 'बंद', 'कर', 'रहे', 'बस्तर', 'के', 'बस्तर', 'क्षेत्र', 'में', 'फिलहाल', 'फिलहाल', 'संकट', 'से']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 69\n",
            "['एनआईए', 'ने', 'पश्चिम', 'बंगाल', 'से', 'आतंकी', 'संगठन', 'के', 'एक', 'संदिग्ध', 'को', 'दिल्ली', 'लाया', 'दिल्ली', 'लाया']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 70\n",
            "['छत्तीसगढ़', 'में', '19', 'आदिवासियों', 'के', 'मारे', 'जाने', 'पर', 'सरकार', 'से', 'इतर', 'कांग्रेस', 'ने', 'कहा', 'दोषी']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 71\n",
            "['छत्तीसगढ़', 'में', 'MBBS', 'की', 'पढ़ाई', 'कर', 'रहे', 'जूनियर', 'डॉक्टरों', 'को', 'हाईकोर्ट', 'ने', 'दिया', 'तगड़ा', 'झटका']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 72\n",
            "['नक्सलियों', 'ने', 'सुरक्षा', 'बलों', 'के', '26', 'जवान', 'शहीद', 'मारे', 'गए', '26', 'की', 'घात', 'लगाकर', 'घात']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 73\n",
            "['छत्तीसगढ़', 'के', 'नौ', 'डॉक्टरों', 'के', 'रजिस्ट्रेशन', 'निलंबित', 'कर', 'रहे', 'महिलाओं', 'की', 'बच्चेदानी', 'निकालने', 'का', 'आरोप']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 74\n",
            "['पुलिस', 'ब्लास्ट', 'में', 'सुकमा', 'के', 'खेरपाल', '3', 'लाख', 'के', '3', 'लाख', 'का', '3', 'जवान', 'घायल']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 75\n",
            "['हिंदुस्तान', 'में', 'जंगली', 'भैंसों', 'की', 'आबादी', 'वाले', 'जंगली', 'का', 'पहला', 'रायपुर', 'की', 'योजना', 'बना', 'रहे']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 76\n",
            "['चिदंबरम', 'ने', 'किसी', 'का', 'अपमान', 'दिया', 'बयान', 'तो', 'चिदंबरम', 'ने', 'की', 'महंगाई', 'की', 'बातों', 'का']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 77\n",
            "['पश्चिम', 'बंगाल', 'में', 'छह', 'महीनों', 'में', 'पार्टियों', 'के', 'बीच', 'हुई', 'झड़पों', 'को', 'चिंता', 'का', 'विषय']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 78\n",
            "['पीएम', 'मोदी', 'यात्रा', 'के', 'दौरान', 'आगे', 'बढ़ने', 'के', 'लिए', 'रोड', 'मैप', 'मैप', 'विकसित', 'करने', 'पर']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 79\n",
            "['प्रदूषण', 'की', 'समस्या', 'का', 'जा', 'रहा', 'दिल्ली', 'सरकार', 'के', 'ऊपर', 'नहीं', 'होना', ':', 'अशोक', 'गहलोत']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 80\n",
            "['राजस्थान', 'की', 'राजधानी', 'जयपुर', 'में', 'हनुमान', 'मंदिर', 'के', 'पीछे', 'नकली', 'नोट', 'छापने', 'का', 'कारखाना', 'पकड़ा']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 81\n",
            "['हिमाचल', 'के', 'मुख्यमंत्री', 'का', 'फैक्टर', 'निर्णायक', 'साबित', 'हुआ', 'हिमाचल', '28', 'साल', 'में', 'फिर', 'संघ', 'का']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 82\n",
            "['सीएम', 'शिवराज', 'चौहान', 'ने', 'आदिवासियों', 'क', 'बीच', 'उनके', 'साथ', 'थिरक', 'कर', 'सबको', 'चौंका', 'दिया', 'था']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 83\n",
            "['हर', 'हर', 'साल', 'देश', 'में', '9,32,600', 'लोगों', 'की', 'वजह', 'से', 'होने', 'वाले', 'रोगों', 'का', 'बावजूद']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 84\n",
            "['चीन', 'के', 'झिनजियांग', 'क्षेत्र', 'में', 'एक', 'साल', 'पहले', 'तक', '181', 'गिरोहों', 'के', 'ठिकानों', 'पर', 'छापेमारी']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 85\n",
            "['ट्रंप', 'ने', 'उत्तर', 'कोरिया', 'को', 'उत्तर', 'कोरिया', 'के', 'खिलाफ', 'उत्तर', 'कोरिया', 'भेजेगा.', 'चीन', 'का', 'करीबी']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 86\n",
            "['भारत-चीन', 'सीमा', 'को', 'सुलझाने', 'में', 'लग', 'रहे', 'गतिरोध', 'के', 'लिए', 'भारत', 'और', 'अविश्वास’', 'के', 'बीच']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 87\n",
            "['डोकलाम', 'और', 'भारत', 'डोकलाम', 'के', 'बीच', 'मिलकर', 'को', 'आगे', 'बढ़ाने', 'के', 'लिये', 'साथ', 'मिलकर', 'काम']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 88\n",
            "['भ्रष्टाचार', 'के', 'खिलाफ', 'चीन', 'में', 'भ्रष्टाचार', 'बदल', 'रहा', 'बड़ा', 'दुश्मन', 'सबसे', 'बड़ा', 'हुआ', 'बर्बाद', 'हो']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 89\n",
            "['चीन', 'ने', 'पीपुल्स', 'आर्मी', 'ने', 'लद्दाख', 'के', 'चुमार', 'गश्त', 'कर', 'रहे', 'चीन', 'में', 'बदलाव', 'नहीं']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 90\n",
            "['चीन', 'ने', 'भारत', 'के', 'एक', 'महात्वाकांक्षी', 'परियोजना', 'को', 'दिया', 'अटका', 'पीछ', 'डोकलाम', 'विवाद', 'का', 'पीछ']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 91\n",
            "['राजस्थान', 'में', 'हलाला', 'के', 'नाम', 'पर', 'महिला', 'के', 'साथ', 'बलात्कार', 'का', 'वीडियो', 'दिखाकर', 'बलात्कार', 'का']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 92\n",
            "['चीन', 'ने', 'इंडिया', 'टुडे', 'को', 'बताया', 'वीजा', 'का', 'इंतजार', 'किया', 'जा', 'रहा', 'श्रद्धालुओं', 'के', 'लिए']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 93\n",
            "['चीन', 'को', 'इस', 'बात', 'का', 'भी', 'नहीं', 'होगा', 'होगा', 'भूटान', 'की', 'क्षेत्रिय', 'संप्रभुता', 'की', 'प्रतिक्रिया']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 94\n",
            "['चीन', 'ने', 'इस्लामाबाद', 'में', 'भी', 'साजिश', 'करने', 'में', 'शामिल', 'हुए', 'आतंकवादियों', 'का', 'नया', 'राजदूत', 'नियुक्त']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 95\n",
            "['चीन', 'की', 'सेना', 'वास्तविक', 'नियंत्रण', 'रेखा', '(एलएसी)', 'पर', 'होटल', 'बनाने', 'की', 'योजना', 'का', 'विकास', 'नियंत्रण']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 96\n",
            "['पाकिस्तान', 'ने', 'पाकिस्तान', 'को', 'आज', 'संयुक्त', 'राष्ट्र', 'को', 'यूएन', 'में', 'करारा', 'जवाब', 'दिया', 'करारा', 'जवाब']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 97\n",
            "['चीन', 'की', 'एक', 'लोक\\u200dगीत', 'ने', 'भारत', 'आकर', 'शी', 'शी', 'जिनपिंग', 'की', 'पत्नी', 'को', 'जान-बूझकर', 'दूर']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 98\n",
            "['चीनी', 'शी', 'ने', 'उत्तर', 'कोरिया', 'के', 'नेता', 'किम', 'जोंग', 'को', 'भेजा', 'एक', 'साल', 'से', 'ज्यादा']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 99\n",
            "['डोकलाम', 'विवाद', 'में', 'रिश्तों', 'में', 'नरमी', 'आने', 'के', 'बाद', 'भारत', 'और', 'चीन', 'के', 'बीच', 'तनाव']\n",
            "file written\n",
            "INFO:tensorflow:sentence summarized 100\n",
            "INFO:tensorflow:Counter 100 stopped.\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "b0lmntKo1p1W",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "zaksum(article,reference ,summary ,\"drive/My Drive/Hindi_News/result_Hindi_CL_Scheduled Sampling_7_12_2019_8_27Am.xml\")"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "cuaO5rbaLTw8",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "978205e8-1b29-4cfe-f164-cc490c13bdcd"
      },
      "source": [
        "len(article)"
      ],
      "execution_count": 22,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "100"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 22
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "BHHRJXCVIBB9",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        ""
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}