{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from sklearn.utils import shuffle\n",
    "import re\n",
    "import time\n",
    "import collections\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def build_dataset(words, n_words, atleast=1):\n",
    "    count = [['PAD', 0], ['GO', 1], ['EOS', 2], ['UNK', 3]]\n",
    "    counter = collections.Counter(words).most_common(n_words)\n",
    "    counter = [i for i in counter if i[1] >= atleast]\n",
    "    count.extend(counter)\n",
    "    dictionary = dict()\n",
    "    for word, _ in count:\n",
    "        dictionary[word] = len(dictionary)\n",
    "    data = list()\n",
    "    unk_count = 0\n",
    "    for word in words:\n",
    "        index = dictionary.get(word, 0)\n",
    "        if index == 0:\n",
    "            unk_count += 1\n",
    "        data.append(index)\n",
    "    count[0][1] = unk_count\n",
    "    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n",
    "    return data, count, dictionary, reversed_dictionary"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "len from: 500, len to: 500\n"
     ]
    }
   ],
   "source": [
    "with open('english-train', 'r') as fopen:\n",
    "    text_from = fopen.read().lower().split('\\n')[:-1]\n",
    "with open('vietnam-train', 'r') as fopen:\n",
    "    text_to = fopen.read().lower().split('\\n')[:-1]\n",
    "print('len from: %d, len to: %d'%(len(text_from), len(text_to)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "vocab from size: 1935\n",
      "Most common words [(',', 564), ('.', 477), ('the', 368), ('and', 286), ('to', 242), ('of', 220)]\n",
      "Sample data [482, 483, 78, 6, 137, 484, 10, 226, 787, 14] ['rachel', 'pike', ':', 'the', 'science', 'behind', 'a', 'climate', 'headline', 'in']\n"
     ]
    }
   ],
   "source": [
    "concat_from = ' '.join(text_from).split()\n",
    "vocabulary_size_from = len(list(set(concat_from)))\n",
    "data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)\n",
    "print('vocab from size: %d'%(vocabulary_size_from))\n",
    "print('Most common words', count_from[4:10])\n",
    "print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "vocab to size: 1461\n",
      "Most common words [(',', 472), ('.', 430), ('tôi', 283), ('và', 230), ('có', 199), ('chúng', 196)]\n",
      "Sample data [84, 22, 668, 73, 10, 389, 110, 34, 81, 299] ['khoa', 'học', 'đằng', 'sau', 'một', 'tiêu', 'đề', 'về', 'khí', 'hậu']\n"
     ]
    }
   ],
   "source": [
    "concat_to = ' '.join(text_to).split()\n",
    "vocabulary_size_to = len(list(set(concat_to)))\n",
    "data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)\n",
    "print('vocab to size: %d'%(vocabulary_size_to))\n",
    "print('Most common words', count_to[4:10])\n",
    "print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "GO = dictionary_from['GO']\n",
    "PAD = dictionary_from['PAD']\n",
    "EOS = dictionary_from['EOS']\n",
    "UNK = dictionary_from['UNK']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in range(len(text_to)):\n",
    "    text_to[i] += ' EOS'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def str_idx(corpus, dic):\n",
    "    X = []\n",
    "    for i in corpus:\n",
    "        ints = []\n",
    "        for k in i.split():\n",
    "            ints.append(dic.get(k,UNK))\n",
    "        X.append(ints)\n",
    "    return X\n",
    "\n",
    "def pad_sentence_batch(sentence_batch, pad_int, maxlen):\n",
    "    padded_seqs = []\n",
    "    seq_lens = []\n",
    "    max_sentence_len = maxlen\n",
    "    for sentence in sentence_batch:\n",
    "        padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))\n",
    "        seq_lens.append(maxlen)\n",
    "    return padded_seqs, seq_lens"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "X = str_idx(text_from, dictionary_from)\n",
    "Y = str_idx(text_to, dictionary_to)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "maxlen_question = max([len(x) for x in X]) * 2\n",
    "maxlen_answer = max([len(y) for y in Y]) * 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "learning_rate = 1e-4\n",
    "batch_size = 16\n",
    "epoch = 20\n",
    "n_layer = 3\n",
    "d_model = 256\n",
    "d_embed = 256\n",
    "n_head = 10\n",
    "d_head = 50\n",
    "d_inner = 512"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def positional_embedding(pos_seq, inv_freq, bsz = None):\n",
    "    sinusoid_inp = tf.einsum('i,j->ij', pos_seq, inv_freq)\n",
    "    pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)\n",
    "    if bsz is not None:\n",
    "        return tf.tile(pos_emb[:, None, :], [1, bsz, 1])\n",
    "    else:\n",
    "        return pos_emb[:, None, :]\n",
    "\n",
    "\n",
    "def positionwise_FF(inp, d_model, d_inner, kernel_initializer, scope = 'ff'):\n",
    "    output = inp\n",
    "    with tf.variable_scope(scope):\n",
    "        output = tf.layers.dense(\n",
    "            inp,\n",
    "            d_inner,\n",
    "            activation = tf.nn.relu,\n",
    "            kernel_initializer = kernel_initializer,\n",
    "            name = 'layer_1',\n",
    "        )\n",
    "        output = tf.layers.dense(\n",
    "            output,\n",
    "            d_model,\n",
    "            kernel_initializer = kernel_initializer,\n",
    "            name = 'layer_2',\n",
    "        )\n",
    "        output = tf.contrib.layers.layer_norm(\n",
    "            output + inp, begin_norm_axis = -1\n",
    "        )\n",
    "    return output\n",
    "\n",
    "\n",
    "def rel_shift(x):\n",
    "    x_size = tf.shape(x)\n",
    "\n",
    "    x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])\n",
    "    x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]])\n",
    "    x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])\n",
    "    x = tf.reshape(x, x_size)\n",
    "\n",
    "    return x\n",
    "\n",
    "\n",
    "def rel_multihead_attn(\n",
    "    w,\n",
    "    r,\n",
    "    r_w_bias,\n",
    "    r_r_bias,\n",
    "    attn_mask,\n",
    "    mems,\n",
    "    d_model,\n",
    "    n_head,\n",
    "    d_head,\n",
    "    kernel_initializer,\n",
    "    scope = 'rel_attn',\n",
    "):\n",
    "    scale = 1 / (d_head ** 0.5)\n",
    "    with tf.variable_scope(scope):\n",
    "        qlen = tf.shape(w)[0]\n",
    "        rlen = tf.shape(r)[0]\n",
    "        bsz = tf.shape(w)[1]\n",
    "\n",
    "        cat = (\n",
    "            tf.concat([mems, w], 0)\n",
    "            if mems is not None and mems.shape.ndims > 1\n",
    "            else w\n",
    "        )\n",
    "        w_heads = tf.layers.dense(\n",
    "            cat,\n",
    "            3 * n_head * d_head,\n",
    "            use_bias = False,\n",
    "            kernel_initializer = kernel_initializer,\n",
    "            name = 'qkv',\n",
    "        )\n",
    "        r_head_k = tf.layers.dense(\n",
    "            r,\n",
    "            n_head * d_head,\n",
    "            use_bias = False,\n",
    "            kernel_initializer = kernel_initializer,\n",
    "            name = 'r',\n",
    "        )\n",
    "\n",
    "        w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, -1)\n",
    "        w_head_q = w_head_q[-qlen:]\n",
    "\n",
    "        klen = tf.shape(w_head_k)[0]\n",
    "\n",
    "        w_head_q = tf.reshape(w_head_q, [qlen, bsz, n_head, d_head])\n",
    "        w_head_k = tf.reshape(w_head_k, [klen, bsz, n_head, d_head])\n",
    "        w_head_v = tf.reshape(w_head_v, [klen, bsz, n_head, d_head])\n",
    "\n",
    "        r_head_k = tf.reshape(r_head_k, [rlen, n_head, d_head])\n",
    "\n",
    "        rw_head_q = w_head_q + r_w_bias\n",
    "        rr_head_q = w_head_q + r_r_bias\n",
    "\n",
    "        AC = tf.einsum('ibnd,jbnd->ijbn', rw_head_q, w_head_k)\n",
    "        BD = tf.einsum('ibnd,jnd->ijbn', rr_head_q, r_head_k)\n",
    "        BD = rel_shift(BD)\n",
    "        \n",
    "        paddings = tf.fill(tf.shape(BD), float('-inf'))\n",
    "\n",
    "        attn_score = (AC + BD) * scale\n",
    "        attn_mask_t = attn_mask[:, :, None, None]\n",
    "        attn_score = attn_score * (1 - attn_mask_t) - 1e30 * attn_mask_t\n",
    "\n",
    "        attn_prob = tf.nn.softmax(attn_score, 1)\n",
    "        attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, w_head_v)\n",
    "        size_t = tf.shape(attn_vec)\n",
    "        attn_vec = tf.reshape(attn_vec, [size_t[0], size_t[1], n_head * d_head])\n",
    "\n",
    "        attn_out = tf.layers.dense(\n",
    "            attn_vec,\n",
    "            d_model,\n",
    "            use_bias = False,\n",
    "            kernel_initializer = kernel_initializer,\n",
    "            name = 'o',\n",
    "        )\n",
    "\n",
    "        output = tf.contrib.layers.layer_norm(\n",
    "            attn_out + w, begin_norm_axis = -1\n",
    "        )\n",
    "    return output\n",
    "\n",
    "\n",
    "def embedding_lookup(lookup_table, x):\n",
    "    return tf.nn.embedding_lookup(lookup_table, x)\n",
    "\n",
    "\n",
    "def mask_adaptive_embedding_lookup(\n",
    "    x,\n",
    "    n_token,\n",
    "    d_embed,\n",
    "    d_proj,\n",
    "    cutoffs,\n",
    "    initializer,\n",
    "    proj_initializer,\n",
    "    div_val = 1,\n",
    "    proj_same_dim = True,\n",
    "    scope = 'adaptive_embed',\n",
    "    **kwargs\n",
    "):\n",
    "    emb_scale = d_proj ** 0.5\n",
    "    with tf.variable_scope(scope):\n",
    "        if div_val == 1:\n",
    "            lookup_table = tf.get_variable(\n",
    "                'lookup_table', [n_token, d_embed], initializer = initializer\n",
    "            )\n",
    "            y = embedding_lookup(lookup_table, x)\n",
    "            if d_proj != d_embed:\n",
    "                proj_W = tf.get_variable(\n",
    "                    'proj_W', [d_embed, d_proj], initializer = proj_initializer\n",
    "                )\n",
    "                y = tf.einsum('ibe,ed->ibd', y, proj_W)\n",
    "            else:\n",
    "                proj_W = None\n",
    "            ret_params = [lookup_table, proj_W]\n",
    "        else:\n",
    "            tables, projs = [], []\n",
    "            cutoff_ends = [0] + cutoffs + [n_token]\n",
    "            x_size = tf.shape(x)\n",
    "            y = tf.zeros([x_size[0], x_size[1], d_proj])\n",
    "            for i in range(len(cutoff_ends) - 1):\n",
    "                with tf.variable_scope('cutoff_{}'.format(i)):\n",
    "                    l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]\n",
    "                    mask = (x >= l_idx) & (x < r_idx)\n",
    "                    cur_x = tf.boolean_mask(x, mask) - l_idx\n",
    "                    cur_d_embed = d_embed // (div_val ** i)\n",
    "                    lookup_table = tf.get_variable(\n",
    "                        'lookup_table',\n",
    "                        [r_idx - l_idx, cur_d_embed],\n",
    "                        initializer = initializer,\n",
    "                    )\n",
    "                    cur_y = embedding_lookup(lookup_table, cur_x)\n",
    "                    if d_proj == cur_d_embed and not proj_same_dim:\n",
    "                        proj_W = None\n",
    "                    else:\n",
    "                        proj_W = tf.get_variable(\n",
    "                            'proj_W',\n",
    "                            [cur_d_embed, d_proj],\n",
    "                            initializer = proj_initializer,\n",
    "                        )\n",
    "                        cur_y = tf.einsum('id,de->ie', cur_y, proj_W)\n",
    "                    mask_idx = tf.to_int64(tf.where(mask))\n",
    "                    y += tf.scatter_nd(\n",
    "                        mask_idx, cur_y, tf.to_int64(tf.shape(y))\n",
    "                    )\n",
    "                    tables.append(lookup_table)\n",
    "                    projs.append(proj_W)\n",
    "            ret_params = [tables, projs]\n",
    "\n",
    "    y *= emb_scale\n",
    "    return y, ret_params\n",
    "\n",
    "\n",
    "def mul_adaptive_embedding_lookup(\n",
    "    x,\n",
    "    n_token,\n",
    "    d_embed,\n",
    "    d_proj,\n",
    "    cutoffs,\n",
    "    initializer,\n",
    "    proj_initializer,\n",
    "    div_val = 1,\n",
    "    perms = None,\n",
    "    proj_same_dim = True,\n",
    "    scope = 'adaptive_embed',\n",
    "):\n",
    "    \"\"\"\n",
    "  perms: If None, first compute W = W1 x W2 (projection for each bin),\n",
    "      and then compute X x W (embedding lookup). If not None,\n",
    "      use bin-based embedding lookup with max_bin_size defined by\n",
    "      the shape of perms.\n",
    "  \"\"\"\n",
    "    emb_scale = d_proj ** 0.5\n",
    "    with tf.variable_scope(scope):\n",
    "        if div_val == 1:\n",
    "            lookup_table = tf.get_variable(\n",
    "                'lookup_table', [n_token, d_embed], initializer = initializer\n",
    "            )\n",
    "            y = embedding_lookup(lookup_table, x)\n",
    "            if d_proj != d_embed:\n",
    "                proj_W = tf.get_variable(\n",
    "                    'proj_W', [d_embed, d_proj], initializer = proj_initializer\n",
    "                )\n",
    "                y = tf.einsum('ibe,ed->ibd', y, proj_W)\n",
    "            else:\n",
    "                proj_W = None\n",
    "            ret_params = [lookup_table, proj_W]\n",
    "        else:\n",
    "            tables, projs = [], []\n",
    "            cutoff_ends = [0] + cutoffs + [n_token]\n",
    "            x_size = tf.shape(x)\n",
    "            if perms is None:\n",
    "                cat_lookup = []\n",
    "            else:\n",
    "                cat_lookup = tf.zeros([x_size[0], x_size[1], d_proj])\n",
    "            for i in range(len(cutoff_ends) - 1):\n",
    "                with tf.variable_scope('cutoff_{}'.format(i)):\n",
    "                    l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]\n",
    "                    cur_d_embed = d_embed // (div_val ** i)\n",
    "                    lookup_table = tf.get_variable(\n",
    "                        'lookup_table',\n",
    "                        [r_idx - l_idx, cur_d_embed],\n",
    "                        initializer = initializer,\n",
    "                    )\n",
    "                    if cur_d_embed == d_proj and not proj_same_dim:\n",
    "                        proj_W = None\n",
    "                    else:\n",
    "                        proj_W = tf.get_variable(\n",
    "                            'proj_W',\n",
    "                            [cur_d_embed, d_proj],\n",
    "                            initializer = proj_initializer,\n",
    "                        )\n",
    "                    if perms is None:\n",
    "                        cat_lookup.append(\n",
    "                            tf.einsum('ie,ed->id', lookup_table, proj_W)\n",
    "                        )\n",
    "                    else:\n",
    "                        # speed up the computation of the first bin\n",
    "                        # also save some meory\n",
    "                        if i == 0:\n",
    "                            cur_y = embedding_lookup(\n",
    "                                lookup_table, tf.minimum(x, r_idx - 1)\n",
    "                            )\n",
    "                            if proj_W is not None:\n",
    "                                cur_y = tf.einsum('ibe,ed->ibd', cur_y, proj_W)\n",
    "                            cur_y *= perms[i][:, :, None]\n",
    "                            cat_lookup += cur_y\n",
    "                        else:\n",
    "                            cur_x = tf.einsum(\n",
    "                                'ib,ibk->k', tf.to_float(x - l_idx), perms[i]\n",
    "                            )\n",
    "                            cur_x = tf.to_int32(cur_x)\n",
    "                            cur_y = embedding_lookup(lookup_table, cur_x)\n",
    "                            if proj_W is not None:\n",
    "                                cur_y = tf.einsum('ke,ed->kd', cur_y, proj_W)\n",
    "                            cat_lookup += tf.einsum(\n",
    "                                'kd,ibk->ibd', cur_y, perms[i]\n",
    "                            )\n",
    "                    tables.append(lookup_table)\n",
    "                    projs.append(proj_W)\n",
    "            if perms is None:\n",
    "                cat_lookup = tf.concat(cat_lookup, 0)\n",
    "                y = embedding_lookup(cat_lookup, x)\n",
    "            else:\n",
    "                y = cat_lookup\n",
    "            ret_params = [tables, projs]\n",
    "\n",
    "    y *= emb_scale\n",
    "    return y, ret_params\n",
    "\n",
    "\n",
    "def mask_adaptive_logsoftmax(\n",
    "    hidden,\n",
    "    target,\n",
    "    n_token,\n",
    "    d_embed,\n",
    "    d_proj,\n",
    "    cutoffs,\n",
    "    params,\n",
    "    tie_projs,\n",
    "    initializer = None,\n",
    "    proj_initializer = None,\n",
    "    div_val = 1,\n",
    "    scope = 'adaptive_softmax',\n",
    "    proj_same_dim = True,\n",
    "    return_mean = True,\n",
    "    **kwargs\n",
    "):\n",
    "    def _logit(x, W, b, proj):\n",
    "        y = x\n",
    "        if proj is not None:\n",
    "            y = tf.einsum('ibd,ed->ibe', y, proj)\n",
    "        return tf.einsum('ibd,nd->ibn', y, W) + b\n",
    "\n",
    "    params_W, params_projs = params[0], params[1]\n",
    "\n",
    "    def _gather_logprob(logprob, target):\n",
    "        lp_size = tf.shape(logprob)\n",
    "        r = tf.range(lp_size[0])\n",
    "        idx = tf.stack([r, target], 1)\n",
    "        return tf.gather_nd(logprob, idx)\n",
    "\n",
    "    with tf.variable_scope(scope):\n",
    "        if len(cutoffs) == 0:\n",
    "            softmax_b = tf.get_variable(\n",
    "                'bias', [n_token], initializer = tf.zeros_initializer()\n",
    "            )\n",
    "            output = _logit(hidden, params_W, softmax_b, params_projs)\n",
    "            nll = tf.nn.sparse_softmax_cross_entropy_with_logits(\n",
    "                labels = target, logits = output\n",
    "            )\n",
    "        else:\n",
    "            cutoff_ends = [0] + cutoffs + [n_token]\n",
    "            nll = tf.zeros_like(target, dtype = tf.float32)\n",
    "            for i in range(len(cutoff_ends) - 1):\n",
    "                with tf.variable_scope('cutoff_{}'.format(i)):\n",
    "                    l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]\n",
    "                    mask = (target >= l_idx) & (target < r_idx)\n",
    "                    mask_idx = tf.where(mask)\n",
    "                    cur_target = tf.boolean_mask(target, mask) - l_idx\n",
    "                    cur_d_embed = d_embed // (div_val ** i)\n",
    "\n",
    "                    if div_val == 1:\n",
    "                        cur_W = params_W[l_idx:r_idx]\n",
    "                    else:\n",
    "                        cur_W = params_W[i]\n",
    "                    cur_b = tf.get_variable(\n",
    "                        'b',\n",
    "                        [r_idx - l_idx],\n",
    "                        initializer = tf.zeros_initializer(),\n",
    "                    )\n",
    "                    if tie_projs[i]:\n",
    "                        if div_val == 1:\n",
    "                            cur_proj = params_projs\n",
    "                        else:\n",
    "                            cur_proj = params_projs[i]\n",
    "                    else:\n",
    "                        if (\n",
    "                            div_val == 1 or not proj_same_dim\n",
    "                        ) and d_proj == cur_d_embed:\n",
    "                            cur_proj = None\n",
    "                        else:\n",
    "                            cur_proj = tf.get_variable(\n",
    "                                'proj',\n",
    "                                [cur_d_embed, d_proj],\n",
    "                                initializer = proj_initializer,\n",
    "                            )\n",
    "                    if i == 0:\n",
    "                        cluster_W = tf.get_variable(\n",
    "                            'cluster_W',\n",
    "                            [len(cutoffs), d_embed],\n",
    "                            initializer = tf.zeros_initializer(),\n",
    "                        )\n",
    "                        cluster_b = tf.get_variable(\n",
    "                            'cluster_b',\n",
    "                            [len(cutoffs)],\n",
    "                            initializer = tf.zeros_initializer(),\n",
    "                        )\n",
    "                        cur_W = tf.concat([cur_W, cluster_W], 0)\n",
    "                        cur_b = tf.concat([cur_b, cluster_b], 0)\n",
    "\n",
    "                        head_logit = _logit(hidden, cur_W, cur_b, cur_proj)\n",
    "                        head_logprob = tf.nn.log_softmax(head_logit)\n",
    "                        cur_head_logprob = tf.boolean_mask(head_logprob, mask)\n",
    "                        cur_logprob = _gather_logprob(\n",
    "                            cur_head_logprob, cur_target\n",
    "                        )\n",
    "                    else:\n",
    "                        cur_head_logprob = tf.boolean_mask(head_logprob, mask)\n",
    "                        cur_hidden = tf.boolean_mask(hidden, mask)\n",
    "                        tail_logit = tf.squeeze(\n",
    "                            _logit(cur_hidden[None], cur_W, cur_b, cur_proj), 0\n",
    "                        )\n",
    "                        tail_logprob = tf.nn.log_softmax(tail_logit)\n",
    "                        cur_logprob = cur_head_logprob[\n",
    "                            :, cutoff_ends[1] + i - 1\n",
    "                        ] + _gather_logprob(tail_logprob, cur_target)\n",
    "                    nll += tf.scatter_nd(\n",
    "                        mask_idx, -cur_logprob, tf.to_int64(tf.shape(nll))\n",
    "                    )\n",
    "    if return_mean:\n",
    "        nll = tf.reduce_mean(nll)\n",
    "    return nll\n",
    "\n",
    "\n",
    "def mul_adaptive_logsoftmax(\n",
    "    hidden,\n",
    "    target,\n",
    "    n_token,\n",
    "    d_embed,\n",
    "    d_proj,\n",
    "    cutoffs,\n",
    "    params,\n",
    "    tie_projs,\n",
    "    initializer = None,\n",
    "    proj_initializer = None,\n",
    "    div_val = 1,\n",
    "    perms = None,\n",
    "    proj_same_dim = True,\n",
    "    scope = 'adaptive_softmax',\n",
    "    **kwargs\n",
    "):\n",
    "    def _logit(x, W, b, proj):\n",
    "        y = x\n",
    "        if x.shape.ndims == 3:\n",
    "            if proj is not None:\n",
    "                y = tf.einsum('ibd,ed->ibe', y, proj)\n",
    "            return tf.einsum('ibd,nd->ibn', y, W) + b\n",
    "        else:\n",
    "            if proj is not None:\n",
    "                y = tf.einsum('id,ed->ie', y, proj)\n",
    "            return tf.einsum('id,nd->in', y, W) + b\n",
    "\n",
    "    params_W, params_projs = params[0], params[1]\n",
    "\n",
    "    with tf.variable_scope(scope):\n",
    "        if len(cutoffs) == 0:\n",
    "            softmax_b = tf.get_variable(\n",
    "                'bias', [n_token], initializer = tf.zeros_initializer()\n",
    "            )\n",
    "            output = _logit(hidden, params_W, softmax_b, params_projs)\n",
    "            nll = tf.nn.sparse_softmax_cross_entropy_with_logits(\n",
    "                labels = target, logits = output\n",
    "            )\n",
    "            nll = tf.reduce_mean(nll)\n",
    "        else:\n",
    "            total_loss, total_cnt = 0, 0\n",
    "            cutoff_ends = [0] + cutoffs + [n_token]\n",
    "            for i in range(len(cutoff_ends) - 1):\n",
    "                with tf.variable_scope('cutoff_{}'.format(i)):\n",
    "                    l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]\n",
    "\n",
    "                    cur_d_embed = d_embed // (div_val ** i)\n",
    "\n",
    "                    if div_val == 1:\n",
    "                        cur_W = params_W[l_idx:r_idx]\n",
    "                    else:\n",
    "                        cur_W = params_W[i]\n",
    "                    cur_b = tf.get_variable(\n",
    "                        'b',\n",
    "                        [r_idx - l_idx],\n",
    "                        initializer = tf.zeros_initializer(),\n",
    "                    )\n",
    "                    if tie_projs[i]:\n",
    "                        if div_val == 1:\n",
    "                            cur_proj = params_projs\n",
    "                        else:\n",
    "                            cur_proj = params_projs[i]\n",
    "                    else:\n",
    "                        if (\n",
    "                            div_val == 1 or not proj_same_dim\n",
    "                        ) and d_proj == cur_d_embed:\n",
    "                            cur_proj = None\n",
    "                        else:\n",
    "                            cur_proj = tf.get_variable(\n",
    "                                'proj',\n",
    "                                [cur_d_embed, d_proj],\n",
    "                                initializer = proj_initializer,\n",
    "                            )\n",
    "\n",
    "                    if i == 0:\n",
    "                        cluster_W = tf.get_variable(\n",
    "                            'cluster_W',\n",
    "                            [len(cutoffs), d_embed],\n",
    "                            initializer = tf.zeros_initializer(),\n",
    "                        )\n",
    "                        cluster_b = tf.get_variable(\n",
    "                            'cluster_b',\n",
    "                            [len(cutoffs)],\n",
    "                            initializer = tf.zeros_initializer(),\n",
    "                        )\n",
    "                        cur_W = tf.concat([cur_W, cluster_W], 0)\n",
    "                        cur_b = tf.concat([cur_b, cluster_b], 0)\n",
    "\n",
    "                        head_logit = _logit(hidden, cur_W, cur_b, cur_proj)\n",
    "\n",
    "                        head_target = kwargs.get('head_target')\n",
    "                        head_nll = tf.nn.sparse_softmax_cross_entropy_with_logits(\n",
    "                            labels = head_target, logits = head_logit\n",
    "                        )\n",
    "\n",
    "                        masked_loss = head_nll * perms[i]\n",
    "                        total_loss += tf.reduce_sum(masked_loss)\n",
    "                        total_cnt += tf.reduce_sum(perms[i])\n",
    "\n",
    "                        # head_logprob = tf.nn.log_softmax(head_logit)\n",
    "\n",
    "                        # final_logprob = head_logprob * perms[i][:, :, None]\n",
    "                        # final_target = tf.one_hot(target, tf.shape(head_logprob)[2])\n",
    "                        # total_loss -= tf.einsum('ibn,ibn->', final_logprob, final_target)\n",
    "                        # total_cnt += tf.reduce_sum(perms[i])\n",
    "                    else:\n",
    "                        cur_head_nll = tf.einsum(\n",
    "                            'ib,ibk->k', head_nll, perms[i]\n",
    "                        )\n",
    "\n",
    "                        cur_hidden = tf.einsum('ibd,ibk->kd', hidden, perms[i])\n",
    "                        tail_logit = _logit(cur_hidden, cur_W, cur_b, cur_proj)\n",
    "\n",
    "                        tail_target = tf.einsum(\n",
    "                            'ib,ibk->k', tf.to_float(target - l_idx), perms[i]\n",
    "                        )\n",
    "                        tail_nll = tf.nn.sparse_softmax_cross_entropy_with_logits(\n",
    "                            labels = tf.to_int32(tail_target),\n",
    "                            logits = tail_logit,\n",
    "                        )\n",
    "\n",
    "                        sum_nll = cur_head_nll + tail_nll\n",
    "                        mask = tf.reduce_sum(perms[i], [0, 1])\n",
    "\n",
    "                        masked_loss = sum_nll * mask\n",
    "                        total_loss += tf.reduce_sum(masked_loss)\n",
    "                        total_cnt += tf.reduce_sum(mask)\n",
    "\n",
    "            nll = total_loss / total_cnt\n",
    "\n",
    "    return nll\n",
    "\n",
    "\n",
    "def _create_mask(qlen, mlen, same_length = False):\n",
    "    attn_mask = tf.ones([qlen, qlen])\n",
    "    mask_u = tf.matrix_band_part(attn_mask, 0, -1)\n",
    "    mask_dia = tf.matrix_band_part(attn_mask, 0, 0)\n",
    "    attn_mask_pad = tf.zeros([qlen, mlen])\n",
    "    ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)\n",
    "    if same_length:\n",
    "        mask_l = tf.matrix_band_part(attn_mask, -1, 0)\n",
    "        ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)\n",
    "    return ret\n",
    "\n",
    "\n",
    "def _cache_mem(curr_out, prev_mem, mem_len = None):\n",
    "    if mem_len is None or prev_mem is None:\n",
    "        new_mem = curr_out\n",
    "    elif mem_len == 0:\n",
    "        return prev_mem\n",
    "    else:\n",
    "        new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:]\n",
    "\n",
    "    return tf.stop_gradient(new_mem)\n",
    "\n",
    "\n",
    "def transformer(\n",
    "    dec_inp,\n",
    "    mems,\n",
    "    n_token,\n",
    "    n_layer,\n",
    "    d_model,\n",
    "    d_embed,\n",
    "    n_head,\n",
    "    d_head,\n",
    "    d_inner,\n",
    "    initializer,\n",
    "    proj_initializer = None,\n",
    "    mem_len = None,\n",
    "    cutoffs = [],\n",
    "    div_val = 1,\n",
    "    tie_projs = [],\n",
    "    same_length = False,\n",
    "    clamp_len = -1,\n",
    "    untie_r = False,\n",
    "    proj_same_dim = True,\n",
    "    scope = 'transformer',\n",
    "    reuse = tf.AUTO_REUSE\n",
    "):\n",
    "    \"\"\"\n",
    "  cutoffs: a list of python int. Cutoffs for adaptive softmax.\n",
    "  tie_projs: a list of python bools. Whether to tie the projections.\n",
    "  perms: a list of tensors. Each tensor should of size [len, bsz, bin_size].\n",
    "        Only used in the adaptive setting.\n",
    "  \"\"\"\n",
    "    new_mems = []\n",
    "    with tf.variable_scope(scope,reuse=reuse):\n",
    "        if untie_r:\n",
    "            r_w_bias = tf.get_variable(\n",
    "                'r_w_bias', [n_layer, n_head, d_head], initializer = initializer\n",
    "            )\n",
    "            r_r_bias = tf.get_variable(\n",
    "                'r_r_bias', [n_layer, n_head, d_head], initializer = initializer\n",
    "            )\n",
    "        else:\n",
    "            r_w_bias = tf.get_variable(\n",
    "                'r_w_bias', [n_head, d_head], initializer = initializer\n",
    "            )\n",
    "            r_r_bias = tf.get_variable(\n",
    "                'r_r_bias', [n_head, d_head], initializer = initializer\n",
    "            )\n",
    "\n",
    "        qlen = tf.shape(dec_inp)[0]\n",
    "        mlen = tf.shape(mems[0])[0] if mems is not None else 0\n",
    "        klen = mlen + qlen\n",
    "\n",
    "        if proj_initializer is None:\n",
    "            proj_initializer = initializer\n",
    "        lookup_fn = mask_adaptive_embedding_lookup\n",
    "        embeddings, shared_params = lookup_fn(\n",
    "            x = dec_inp,\n",
    "            n_token = n_token,\n",
    "            d_embed = d_embed,\n",
    "            d_proj = d_model,\n",
    "            cutoffs = cutoffs,\n",
    "            initializer = initializer,\n",
    "            proj_initializer = proj_initializer,\n",
    "            div_val = div_val,\n",
    "            proj_same_dim = proj_same_dim,\n",
    "        )\n",
    "\n",
    "        attn_mask = _create_mask(qlen, mlen, same_length)\n",
    "\n",
    "        pos_seq = tf.range(klen - 1, -1, -1.0)\n",
    "        if clamp_len > 0:\n",
    "            pos_seq = tf.minimum(pos_seq, clamp_len)\n",
    "        inv_freq = 1 / (10000 ** (tf.range(0, d_model, 2.0) / d_model))\n",
    "        pos_emb = positional_embedding(pos_seq, inv_freq)\n",
    "\n",
    "        if mems is None:\n",
    "            mems = [None] * n_layer\n",
    "        output = embeddings\n",
    "        for i in range(n_layer):\n",
    "            # cache new mems\n",
    "            new_mems.append(_cache_mem(output, mems[i], mem_len))\n",
    "\n",
    "            with tf.variable_scope('layer_{}'.format(i)):\n",
    "                output = rel_multihead_attn(\n",
    "                    w = output,\n",
    "                    r = pos_emb,\n",
    "                    r_w_bias = r_w_bias if not untie_r else r_w_bias[i],\n",
    "                    r_r_bias = r_r_bias if not untie_r else r_r_bias[i],\n",
    "                    attn_mask = attn_mask,\n",
    "                    mems = mems[i],\n",
    "                    d_model = d_model,\n",
    "                    n_head = n_head,\n",
    "                    d_head = d_head,\n",
    "                    kernel_initializer = initializer,\n",
    "                )\n",
    "                output = positionwise_FF(\n",
    "                    inp = output,\n",
    "                    d_model = d_model,\n",
    "                    d_inner = d_inner,\n",
    "                    kernel_initializer = initializer,\n",
    "                )\n",
    "\n",
    "        return output, new_mems"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Chatbot:\n",
    "    def __init__(self):\n",
    "\n",
    "        self.X = tf.placeholder(tf.int32, [None, None])\n",
    "        self.Y = tf.placeholder(tf.int32, [None, None])\n",
    "\n",
    "        self.X_seq_len = tf.count_nonzero(self.X, 1, dtype = tf.int32)\n",
    "        self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype = tf.int32)\n",
    "        batch_size = tf.shape(self.X)[0]\n",
    "        main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])\n",
    "        decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)\n",
    "        initializer = tf.initializers.random_normal(stddev = 0.1)\n",
    "\n",
    "        def forward(x, y, reuse = tf.AUTO_REUSE):\n",
    "            memory = tf.fill(\n",
    "                [n_layer, tf.shape(x)[0], tf.shape(x)[1], d_model], PAD\n",
    "            )\n",
    "            memory = tf.cast(memory, tf.float32)\n",
    "            logits, next_memory = transformer(\n",
    "                x,\n",
    "                memory,\n",
    "                len(dictionary_from),\n",
    "                n_layer,\n",
    "                d_model,\n",
    "                d_embed,\n",
    "                n_head,\n",
    "                d_head,\n",
    "                d_inner,\n",
    "                initializer,\n",
    "                scope='encoder',\n",
    "                reuse=reuse\n",
    "            )\n",
    "            logits, next_memory = transformer(\n",
    "                x,\n",
    "                next_memory,\n",
    "                len(dictionary_to),\n",
    "                n_layer,\n",
    "                d_model,\n",
    "                d_embed,\n",
    "                n_head,\n",
    "                d_head,\n",
    "                d_inner,\n",
    "                initializer,\n",
    "                scope='decoder',\n",
    "                reuse=reuse\n",
    "            )\n",
    "            logits = transformer(\n",
    "                y,\n",
    "                next_memory,\n",
    "                len(dictionary_to),\n",
    "                n_layer,\n",
    "                d_model,\n",
    "                d_embed,\n",
    "                n_head,\n",
    "                d_head,\n",
    "                d_inner,\n",
    "                initializer,\n",
    "                scope='decoder_1',\n",
    "                reuse=reuse\n",
    "            )[0]\n",
    "            return tf.layers.dense(logits, len(dictionary_from), reuse=tf.AUTO_REUSE)\n",
    "        self.training_logits = forward(self.X, decoder_input)\n",
    "        \n",
    "        def cond(i, y, temp):\n",
    "            return i < tf.reduce_max(tf.shape(self.X)[1])\n",
    "        \n",
    "        def body(i, y, temp):\n",
    "            logits = forward(self.X, y,reuse=True)\n",
    "            ids = tf.argmax(logits, -1)[:, i]\n",
    "            ids = tf.expand_dims(ids, -1)\n",
    "            temp = tf.concat([temp[:, 1:], ids], -1)\n",
    "            y = tf.concat([temp[:, -(i+1):], temp[:, :-(i+1)]], -1)\n",
    "            y = tf.reshape(y, [tf.shape(temp)[0], tf.shape(self.X)[1]])\n",
    "            i += 1\n",
    "            return i, y, temp\n",
    "        \n",
    "        target = tf.fill([batch_size, tf.shape(self.X)[1]], GO)\n",
    "        target = tf.cast(target, tf.int64)\n",
    "        self.target = target\n",
    "        \n",
    "        _, self.predicting_ids, _ = tf.while_loop(cond, body, \n",
    "                                                  [tf.constant(0), target, target])\n",
    "        \n",
    "        masks = tf.sequence_mask(self.Y_seq_len, maxlen_answer, dtype=tf.float32)\n",
    "        self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,\n",
    "                                                     targets = self.Y,\n",
    "                                                     weights = masks)\n",
    "        self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)\n",
    "        y_t = tf.argmax(self.training_logits,axis=2)\n",
    "        y_t = tf.cast(y_t, tf.int32)\n",
    "        self.prediction = tf.boolean_mask(y_t, masks)\n",
    "        mask_label = tf.boolean_mask(self.Y, masks)\n",
    "        correct_pred = tf.equal(self.prediction, mask_label)\n",
    "        correct_index = tf.cast(correct_pred, tf.float32)\n",
    "        self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "tf.reset_default_graph()\n",
    "sess = tf.InteractiveSession()\n",
    "model = Chatbot()\n",
    "sess.run(tf.global_variables_initializer())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 1, avg loss: 7.368046, avg accuracy: 0.030184\n",
      "epoch: 2, avg loss: 6.681681, avg accuracy: 0.044937\n",
      "epoch: 3, avg loss: 6.353854, avg accuracy: 0.046410\n",
      "epoch: 4, avg loss: 6.194322, avg accuracy: 0.054326\n",
      "epoch: 5, avg loss: 6.122391, avg accuracy: 0.067359\n",
      "epoch: 6, avg loss: 6.000588, avg accuracy: 0.082808\n",
      "epoch: 7, avg loss: 5.978110, avg accuracy: 0.087615\n",
      "epoch: 8, avg loss: 5.963039, avg accuracy: 0.086300\n",
      "epoch: 9, avg loss: 5.949830, avg accuracy: 0.090680\n",
      "epoch: 10, avg loss: 5.933142, avg accuracy: 0.089480\n",
      "epoch: 11, avg loss: 5.913042, avg accuracy: 0.090779\n",
      "epoch: 12, avg loss: 5.910972, avg accuracy: 0.091650\n",
      "epoch: 13, avg loss: 5.897120, avg accuracy: 0.095094\n",
      "epoch: 14, avg loss: 5.865714, avg accuracy: 0.102273\n",
      "epoch: 15, avg loss: 5.853282, avg accuracy: 0.101984\n",
      "epoch: 16, avg loss: 5.828940, avg accuracy: 0.102335\n",
      "epoch: 17, avg loss: 5.814208, avg accuracy: 0.101593\n",
      "epoch: 18, avg loss: 5.786762, avg accuracy: 0.103044\n",
      "epoch: 19, avg loss: 5.774316, avg accuracy: 0.109873\n",
      "epoch: 20, avg loss: 5.728468, avg accuracy: 0.114907\n"
     ]
    }
   ],
   "source": [
    "for i in range(epoch):\n",
    "    total_loss, total_accuracy = 0, 0\n",
    "    X, Y = shuffle(X, Y)\n",
    "    for k in range(0, len(text_to), batch_size):\n",
    "        index = min(k + batch_size, len(text_to))\n",
    "        batch_x, seq_x = pad_sentence_batch(X[k: index], PAD, maxlen_answer)\n",
    "        batch_y, seq_y = pad_sentence_batch(Y[k: index], PAD, maxlen_answer)\n",
    "        predicted, accuracy,loss, _ = sess.run([model.predicting_ids, \n",
    "                                                model.accuracy, model.cost, model.optimizer], \n",
    "                                      feed_dict={model.X:batch_x,\n",
    "                                                model.Y:batch_y})\n",
    "        total_loss += loss\n",
    "        total_accuracy += accuracy\n",
    "    total_loss /= (len(text_to) / batch_size)\n",
    "    total_accuracy /= (len(text_to) / batch_size)\n",
    "    print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "row 1\n",
      "QUESTION: we restarted from our strengths , and at the same time we restarted from his strengths .\n",
      "REAL ANSWER: chúng tôi bắt đầu lại từ nghị lực , đồng thời , bắt đầu lại từ khả năng của bé .\n",
      "PREDICTED ANSWER: tôi tôi , , chúng tôi tôi , và , , tôi chúng , , tôi . . , . . và . , . . , . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . \n",
      "\n",
      "row 2\n",
      "QUESTION: but we didn &apos;t just fly it . we were flying at 100 meters above the top of the canopy to measure this molecule -- incredibly dangerous stuff .\n",
      "REAL ANSWER: chúng tôi không chỉ bay . chúng tôi bay cách tầng vòm của rừng 100 mét để đo đạc phân tử này -- chuyện vô cùng nguy hiểm .\n",
      "PREDICTED ANSWER: tôi tôi , tôi chúng tôi và , và , , tôi chúng . tôi tôi . . , . . chúng . , . . , . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . \n",
      "\n",
      "row 3\n",
      "QUESTION: so this stuff is just beginning .\n",
      "REAL ANSWER: vậy đây chỉ mới là sự bắt đầu .\n",
      "PREDICTED ANSWER: tôi tôi , tôi chúng tôi và , và . , tôi chúng . tôi và . . , . . chúng . , . . , . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . \n",
      "\n",
      "row 4\n",
      "QUESTION: when you have nanotechnology and nanoscience , what &apos;s occurred is that we &apos;re able to now look at atoms and molecules and actually control them for great benefits .\n",
      "REAL ANSWER: khi chúng ta có kĩ thuật vi phân tử và khoa học vi phân tử chúng ta có thể nhìn thấy nguyên tử và phân tử và có thể điều khiển chúng để đem lại nhiều lợi ích\n",
      "PREDICTED ANSWER: tôi tôi , tôi chúng , và , và . , tôi chúng . , và . . , . . chúng . , . . , . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . \n",
      "\n"
     ]
    }
   ],
   "source": [
    "for i in range(len(batch_x)):\n",
    "    print('row %d'%(i+1))\n",
    "    print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))\n",
    "    print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))\n",
    "    print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
