{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = '0'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from malaya.train.model.bigbird import modeling, utils"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "bert_config = {\n",
    "    'attention_probs_dropout_prob': 0.1,\n",
    "    'hidden_act': 'gelu',\n",
    "    'hidden_dropout_prob': 0.1,\n",
    "    'hidden_size': 256,\n",
    "    'initializer_range': 0.02,\n",
    "    'intermediate_size': 1024,\n",
    "    'max_position_embeddings': 2048,\n",
    "    'max_encoder_length': 1024,\n",
    "    'max_decoder_length': 1024,\n",
    "    'num_attention_heads': 4,\n",
    "    'num_hidden_layers': 2,\n",
    "    'type_vocab_size': 2,\n",
    "    'scope': 'bert',\n",
    "    'use_bias': True,\n",
    "    'rescale_embedding': False,\n",
    "    'vocab_model_file': None,\n",
    "    'attention_type': 'block_sparse',\n",
    "    'block_size': 16,\n",
    "    'num_rand_blocks': 3,\n",
    "    'vocab_size': 32000,\n",
    "    'couple_encoder_decoder': False,\n",
    "    'beam_size': 1,\n",
    "    'alpha': 0.0,\n",
    "    'label_smoothing': 0.1,\n",
    "    'norm_type': 'postnorm',\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sentencepiece as spm\n",
    "\n",
    "vocab = 'sp10m.cased.translation.model'\n",
    "sp = spm.SentencePieceProcessor()\n",
    "sp.Load(vocab)\n",
    "\n",
    "class Encoder:\n",
    "    def __init__(self, sp):\n",
    "        self.sp = sp\n",
    "    \n",
    "    def encode(self, s):\n",
    "        return self.sp.EncodeAsIds(s) + [1]\n",
    "    \n",
    "    def decode(self, ids, strip_extraneous=False):\n",
    "        return self.sp.DecodeIds(list(ids))\n",
    "    \n",
    "encoder = Encoder(sp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = modeling.TransformerModel(bert_config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "X = tf.placeholder(tf.int32, [None, None])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/train/model/bigbird/modeling.py:226: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.where in 2.0, which has the same broadcast rule as np.where\n",
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py:507: calling count_nonzero (from tensorflow.python.ops.math_ops) with axis is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "reduction_indices is deprecated, use axis instead\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py:507: calling count_nonzero (from tensorflow.python.ops.math_ops) with axis is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "reduction_indices is deprecated, use axis instead\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "((<tf.Tensor 'bert/log_probs:0' shape=(?, 1024) dtype=float32>,\n",
       "  <tf.Tensor 'bert/logits:0' shape=(?, 1024, 32000) dtype=float32>,\n",
       "  <tf.Tensor 'bert/while/Exit_1:0' shape=(?, 1024) dtype=int32>),\n",
       " <tf.Tensor 'bert/encoder/layer_1/output/LayerNorm/batchnorm/add_1:0' shape=(?, 1024, 256) dtype=float32>)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "r = model(X, training = False)\n",
    "r"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor 'logits:0' shape=(?, 1024) dtype=int32>"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "logits = tf.identity(r[0][2], name = 'logits')\n",
    "logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'bigbird-small-en-ms/model.ckpt-500000'"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ckpt_path = tf.train.latest_checkpoint('bigbird-small-en-ms')\n",
    "ckpt_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "sess = tf.InteractiveSession()\n",
    "sess.run(tf.global_variables_initializer())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from bigbird-small-en-ms/model.ckpt-500000\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from bigbird-small-en-ms/model.ckpt-500000\n"
     ]
    }
   ],
   "source": [
    "saver = tf.train.Saver()\n",
    "saver.restore(sess, ckpt_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "pad_sequences = tf.keras.preprocessing.sequence.pad_sequences"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "from unidecode import unidecode\n",
    "\n",
    "def cleaning(string):\n",
    "    return re.sub(r'[ ]+', ' ', unidecode(string.replace('\\n', ' '))).strip()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Amongst the wide-ranging initiatives proposed are a sustainable food labelling framework, a reformulation of processed foods, and a sustainability chapter in all EU bilateral trade agreements. The EU also plans to publish a proposal for a legislative framework for sustainable food systems by 2023 to ensure all foods on the EU market become increasingly sustainable.'"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "string = \"\"\"\n",
    "Amongst the wide-ranging initiatives proposed are a sustainable food labelling framework, a reformulation of processed foods, and a sustainability chapter in all EU bilateral trade agreements. The EU also plans to publish a proposal for a legislative framework for sustainable food systems by 2023 to ensure all foods on the EU market become increasingly sustainable.\n",
    "\"\"\"\n",
    "cleaning(string)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "encoded = encoder.encode(f'{cleaning(string)}') + [1]\n",
    "s = pad_sequences([encoded], padding='post', maxlen = 1024)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 989 ms, sys: 119 ms, total: 1.11 s\n",
      "Wall time: 1.02 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "l = sess.run(r[0][2], feed_dict = {X: s})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Antara inisiatif penyusunan luas yang dicadangkan adalah kerangka pelabelan makanan lestari, reformasi makanan diproses, dan bab kelestarian dalam semua perjanjian perdagangan dua hala EU. EU juga merancang untuk menerbitkan cadangan untuk kerangka perundangan untuk sistem makanan lestari menjelang 2023 untuk memastikan semua makanan di pasaran EU semakin mampan.'"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "encoder.decode([i for i in l[0].tolist() if i > 0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# !wget https://f000.backblazeb2.com/file/malay-dataset/test-en-ms.tar.gz\n",
    "# !tar -zxf test-en-ms.tar.gz"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(77707, 77707)"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "batch_size = 24\n",
    "\n",
    "path = 'test-en'\n",
    "\n",
    "with open(os.path.join(path, 'left.txt')) as fopen:\n",
    "    left = fopen.read().split('\\n')\n",
    "    \n",
    "with open(os.path.join(path, 'right.txt')) as fopen:\n",
    "    right = fopen.read().split('\\n')\n",
    "    \n",
    "len(left), len(right)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 2.16 ms, sys: 0 ns, total: 2.16 ms\n",
      "Wall time: 2.09 ms\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "encoded = encoder.encode(left[0]) + [1]\n",
    "s = pad_sequences([encoded], padding='post', maxlen = 1024)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 1.83 s, sys: 122 ms, total: 1.95 s\n",
      "Wall time: 1.65 s\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[[2439,\n",
       "  1676,\n",
       "  1663,\n",
       "  3568,\n",
       "  47,\n",
       "  436,\n",
       "  9,\n",
       "  3045,\n",
       "  103,\n",
       "  26,\n",
       "  19928,\n",
       "  15084,\n",
       "  20,\n",
       "  1596,\n",
       "  13,\n",
       "  23102,\n",
       "  30540,\n",
       "  3853,\n",
       "  841,\n",
       "  10954,\n",
       "  33,\n",
       "  879,\n",
       "  81,\n",
       "  9,\n",
       "  863,\n",
       "  4237,\n",
       "  3925,\n",
       "  20,\n",
       "  12195,\n",
       "  20,\n",
       "  22720,\n",
       "  20,\n",
       "  12195,\n",
       "  207,\n",
       "  27,\n",
       "  1478,\n",
       "  14236,\n",
       "  27,\n",
       "  2190,\n",
       "  31,\n",
       "  163,\n",
       "  107,\n",
       "  194,\n",
       "  133,\n",
       "  19975,\n",
       "  9,\n",
       "  7391,\n",
       "  20,\n",
       "  81,\n",
       "  127,\n",
       "  1058,\n",
       "  3459,\n",
       "  879,\n",
       "  1356,\n",
       "  2010,\n",
       "  4711,\n",
       "  13,\n",
       "  791,\n",
       "  28,\n",
       "  130,\n",
       "  6268,\n",
       "  19,\n",
       "  221,\n",
       "  5798,\n",
       "  1001,\n",
       "  20,\n",
       "  262,\n",
       "  5350,\n",
       "  26,\n",
       "  25676,\n",
       "  2345,\n",
       "  3754,\n",
       "  20,\n",
       "  107,\n",
       "  26,\n",
       "  2345,\n",
       "  753,\n",
       "  6957,\n",
       "  20,\n",
       "  2345,\n",
       "  2588,\n",
       "  50,\n",
       "  30941,\n",
       "  7397,\n",
       "  27,\n",
       "  2345,\n",
       "  10860,\n",
       "  37,\n",
       "  4603,\n",
       "  8431,\n",
       "  5662,\n",
       "  26,\n",
       "  12265,\n",
       "  39,\n",
       "  614,\n",
       "  368,\n",
       "  43,\n",
       "  233,\n",
       "  1093,\n",
       "  31,\n",
       "  118,\n",
       "  15767,\n",
       "  25946,\n",
       "  529,\n",
       "  1431,\n",
       "  154,\n",
       "  19,\n",
       "  6892,\n",
       "  13,\n",
       "  791,\n",
       "  28,\n",
       "  259,\n",
       "  1196,\n",
       "  3203,\n",
       "  162,\n",
       "  891,\n",
       "  20,\n",
       "  1185,\n",
       "  192,\n",
       "  13434,\n",
       "  1679,\n",
       "  5662,\n",
       "  26,\n",
       "  54,\n",
       "  127,\n",
       "  17142,\n",
       "  9,\n",
       "  206,\n",
       "  442,\n",
       "  2168,\n",
       "  1616,\n",
       "  367,\n",
       "  177,\n",
       "  17474,\n",
       "  158,\n",
       "  27253,\n",
       "  20,\n",
       "  26,\n",
       "  1940,\n",
       "  16718,\n",
       "  5662,\n",
       "  2684,\n",
       "  27,\n",
       "  2356,\n",
       "  31,\n",
       "  22008,\n",
       "  4775,\n",
       "  10036,\n",
       "  14867,\n",
       "  2094,\n",
       "  36,\n",
       "  20,\n",
       "  177,\n",
       "  751,\n",
       "  3396,\n",
       "  240,\n",
       "  407,\n",
       "  29288,\n",
       "  33,\n",
       "  24376,\n",
       "  67,\n",
       "  10301,\n",
       "  144,\n",
       "  27,\n",
       "  8787,\n",
       "  654,\n",
       "  58,\n",
       "  5752,\n",
       "  530,\n",
       "  27,\n",
       "  3016,\n",
       "  16638,\n",
       "  769,\n",
       "  1921,\n",
       "  13,\n",
       "  3671,\n",
       "  39,\n",
       "  3350,\n",
       "  21547,\n",
       "  77,\n",
       "  5545,\n",
       "  9,\n",
       "  14641,\n",
       "  192,\n",
       "  544,\n",
       "  4560,\n",
       "  5350,\n",
       "  26,\n",
       "  194,\n",
       "  561]]"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "p = sess.run(logits, feed_dict = {X: s}).tolist()\n",
    "results = []\n",
    "for row in p:\n",
    "    results.append([i for i in row if i not in [0, 1]])\n",
    "results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.51621497"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from tensor2tensor.utils import bleu_hook\n",
    "bleu_hook.compute_bleu(reference_corpus = [encoder.encode(right[0])], \n",
    "                       translation_corpus = results)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 3238/3238 [1:32:58<00:00,  1.72s/it]\n"
     ]
    }
   ],
   "source": [
    "from tqdm import tqdm\n",
    "\n",
    "results = []\n",
    "for i in tqdm(range(0, len(left), batch_size)):\n",
    "    index = min(i + batch_size, len(left))\n",
    "    x = left[i: index]\n",
    "    encoded = [encoder.encode(l) + [1] for l in x]\n",
    "    batch_x = pad_sequences(encoded, padding='post', maxlen = 1024)\n",
    "    \n",
    "    p = sess.run(logits, feed_dict = {X: batch_x}).tolist()\n",
    "    result = []\n",
    "    for row in p:\n",
    "        result.append([i for i in row if i not in [0, 1]])\n",
    "    results.extend(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.20803314"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rights = [encoder.encode(r) for r in right[:len(results)]]\n",
    "bleu_hook.compute_bleu(reference_corpus = rights,\n",
    "                       translation_corpus = results)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'output/model.ckpt'"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "saver = tf.train.Saver(tf.trainable_variables())\n",
    "saver.save(sess, 'output/model.ckpt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['bert/embeddings/word_embeddings',\n",
       " 'bert/embeddings/position_embeddings',\n",
       " 'Placeholder',\n",
       " 'bert/encoder/LayerNorm/gamma',\n",
       " 'bert/encoder/layer_0/attention/self/query/kernel',\n",
       " 'bert/encoder/layer_0/attention/self/query/bias',\n",
       " 'bert/encoder/layer_0/attention/self/key/kernel',\n",
       " 'bert/encoder/layer_0/attention/self/key/bias',\n",
       " 'bert/encoder/layer_0/attention/self/value/kernel',\n",
       " 'bert/encoder/layer_0/attention/self/value/bias',\n",
       " 'bert/encoder/layer_0/attention/self/Softmax',\n",
       " 'bert/encoder/layer_0/attention/self/Softmax_1',\n",
       " 'bert/encoder/layer_0/attention/self/Softmax_2',\n",
       " 'bert/encoder/layer_0/attention/self/Softmax_3',\n",
       " 'bert/encoder/layer_0/attention/self/Softmax_4',\n",
       " 'bert/encoder/layer_0/attention/output/dense/kernel',\n",
       " 'bert/encoder/layer_0/attention/output/dense/bias',\n",
       " 'bert/encoder/layer_0/attention/output/LayerNorm/gamma',\n",
       " 'bert/encoder/layer_0/intermediate/dense/kernel',\n",
       " 'bert/encoder/layer_0/intermediate/dense/bias',\n",
       " 'bert/encoder/layer_0/output/dense/kernel',\n",
       " 'bert/encoder/layer_0/output/dense/bias',\n",
       " 'bert/encoder/layer_0/output/LayerNorm/gamma',\n",
       " 'bert/encoder/layer_1/attention/self/query/kernel',\n",
       " 'bert/encoder/layer_1/attention/self/query/bias',\n",
       " 'bert/encoder/layer_1/attention/self/key/kernel',\n",
       " 'bert/encoder/layer_1/attention/self/key/bias',\n",
       " 'bert/encoder/layer_1/attention/self/value/kernel',\n",
       " 'bert/encoder/layer_1/attention/self/value/bias',\n",
       " 'bert/encoder/layer_1/attention/self/Softmax',\n",
       " 'bert/encoder/layer_1/attention/self/Softmax_1',\n",
       " 'bert/encoder/layer_1/attention/self/Softmax_2',\n",
       " 'bert/encoder/layer_1/attention/self/Softmax_3',\n",
       " 'bert/encoder/layer_1/attention/self/Softmax_4',\n",
       " 'bert/encoder/layer_1/attention/output/dense/kernel',\n",
       " 'bert/encoder/layer_1/attention/output/dense/bias',\n",
       " 'bert/encoder/layer_1/attention/output/LayerNorm/gamma',\n",
       " 'bert/encoder/layer_1/intermediate/dense/kernel',\n",
       " 'bert/encoder/layer_1/intermediate/dense/bias',\n",
       " 'bert/encoder/layer_1/output/dense/kernel',\n",
       " 'bert/encoder/layer_1/output/dense/bias',\n",
       " 'bert/encoder/layer_1/output/LayerNorm/gamma',\n",
       " 'bert/decoder/LayerNorm/gamma',\n",
       " 'bert/decoder/layer_0/attention/self/query/kernel',\n",
       " 'bert/decoder/layer_0/attention/self/query/bias',\n",
       " 'bert/decoder/layer_0/attention/self/key/kernel',\n",
       " 'bert/decoder/layer_0/attention/self/key/bias',\n",
       " 'bert/decoder/layer_0/attention/self/value/kernel',\n",
       " 'bert/decoder/layer_0/attention/self/value/bias',\n",
       " 'bert/while/decoder/layer_0/attention/self/Softmax',\n",
       " 'bert/decoder/layer_0/attention/output/dense/kernel',\n",
       " 'bert/decoder/layer_0/attention/output/dense/bias',\n",
       " 'bert/decoder/layer_0/attention/output/LayerNorm/gamma',\n",
       " 'bert/decoder/layer_0/attention/encdec/query/kernel',\n",
       " 'bert/decoder/layer_0/attention/encdec/query/bias',\n",
       " 'bert/decoder/layer_0/attention/encdec/key/kernel',\n",
       " 'bert/decoder/layer_0/attention/encdec/key/bias',\n",
       " 'bert/decoder/layer_0/attention/encdec/value/kernel',\n",
       " 'bert/decoder/layer_0/attention/encdec/value/bias',\n",
       " 'bert/decoder/layer_0/attention/encdec_output/dense/kernel',\n",
       " 'bert/decoder/layer_0/attention/encdec_output/dense/bias',\n",
       " 'bert/decoder/layer_0/attention/encdec_output/LayerNorm/gamma',\n",
       " 'bert/decoder/layer_0/intermediate/dense/kernel',\n",
       " 'bert/decoder/layer_0/intermediate/dense/bias',\n",
       " 'bert/decoder/layer_0/output/dense/kernel',\n",
       " 'bert/decoder/layer_0/output/dense/bias',\n",
       " 'bert/decoder/layer_0/output/LayerNorm/gamma',\n",
       " 'bert/decoder/layer_1/attention/self/query/kernel',\n",
       " 'bert/decoder/layer_1/attention/self/query/bias',\n",
       " 'bert/decoder/layer_1/attention/self/key/kernel',\n",
       " 'bert/decoder/layer_1/attention/self/key/bias',\n",
       " 'bert/decoder/layer_1/attention/self/value/kernel',\n",
       " 'bert/decoder/layer_1/attention/self/value/bias',\n",
       " 'bert/while/decoder/layer_1/attention/self/Softmax',\n",
       " 'bert/decoder/layer_1/attention/output/dense/kernel',\n",
       " 'bert/decoder/layer_1/attention/output/dense/bias',\n",
       " 'bert/decoder/layer_1/attention/output/LayerNorm/gamma',\n",
       " 'bert/decoder/layer_1/attention/encdec/query/kernel',\n",
       " 'bert/decoder/layer_1/attention/encdec/query/bias',\n",
       " 'bert/decoder/layer_1/attention/encdec/key/kernel',\n",
       " 'bert/decoder/layer_1/attention/encdec/key/bias',\n",
       " 'bert/decoder/layer_1/attention/encdec/value/kernel',\n",
       " 'bert/decoder/layer_1/attention/encdec/value/bias',\n",
       " 'bert/decoder/layer_1/attention/encdec_output/dense/kernel',\n",
       " 'bert/decoder/layer_1/attention/encdec_output/dense/bias',\n",
       " 'bert/decoder/layer_1/attention/encdec_output/LayerNorm/gamma',\n",
       " 'bert/decoder/layer_1/intermediate/dense/kernel',\n",
       " 'bert/decoder/layer_1/intermediate/dense/bias',\n",
       " 'bert/decoder/layer_1/output/dense/kernel',\n",
       " 'bert/decoder/layer_1/output/dense/bias',\n",
       " 'bert/decoder/layer_1/output/LayerNorm/gamma',\n",
       " 'bert/while/decoder/layer_0/attention/self/Softmax_1',\n",
       " 'bert/while/decoder/layer_1/attention/self/Softmax_1',\n",
       " 'bert/logits',\n",
       " 'logits']"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "strings = ','.join(\n",
    "    [\n",
    "        n.name\n",
    "        for n in tf.get_default_graph().as_graph_def().node\n",
    "        if ('Variable' in n.op\n",
    "        or 'Placeholder' in n.name\n",
    "        or 'logits' in n.name\n",
    "        or 'alphas' in n.name\n",
    "        or 'self/Softmax' in n.name)\n",
    "        and 'adam' not in n.name\n",
    "        and 'beta' not in n.name\n",
    "        and 'global_step' not in n.name\n",
    "        and 'gradients' not in n.name\n",
    "    ]\n",
    ")\n",
    "strings.split(',')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "def freeze_graph(model_dir, output_node_names):\n",
    "\n",
    "    if not tf.gfile.Exists(model_dir):\n",
    "        raise AssertionError(\n",
    "            \"Export directory doesn't exists. Please specify an export \"\n",
    "            'directory: %s' % model_dir\n",
    "        )\n",
    "\n",
    "    checkpoint = tf.train.get_checkpoint_state(model_dir)\n",
    "    input_checkpoint = checkpoint.model_checkpoint_path\n",
    "\n",
    "    absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])\n",
    "    output_graph = absolute_model_dir + '/frozen_model.pb'\n",
    "    clear_devices = True\n",
    "    with tf.Session(graph = tf.Graph()) as sess:\n",
    "        saver = tf.train.import_meta_graph(\n",
    "            input_checkpoint + '.meta', clear_devices = clear_devices\n",
    "        )\n",
    "        saver.restore(sess, input_checkpoint)\n",
    "        output_graph_def = tf.graph_util.convert_variables_to_constants(\n",
    "            sess,\n",
    "            tf.get_default_graph().as_graph_def(),\n",
    "            output_node_names.split(','),\n",
    "        )\n",
    "        with tf.gfile.GFile(output_graph, 'wb') as f:\n",
    "            f.write(output_graph_def.SerializeToString())\n",
    "        print('%d ops in the final graph.' % len(output_graph_def.node))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from output/model.ckpt\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from output/model.ckpt\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-28-9a7215a4e58a>:23: convert_variables_to_constants (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.compat.v1.graph_util.convert_variables_to_constants`\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-28-9a7215a4e58a>:23: convert_variables_to_constants (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.compat.v1.graph_util.convert_variables_to_constants`\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/framework/graph_util_impl.py:277: extract_sub_graph (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.compat.v1.graph_util.extract_sub_graph`\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/framework/graph_util_impl.py:277: extract_sub_graph (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.compat.v1.graph_util.extract_sub_graph`\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Froze 90 variables.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Froze 90 variables.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Converted 90 variables to const ops.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Converted 90 variables to const ops.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "4602 ops in the final graph.\n"
     ]
    }
   ],
   "source": [
    "freeze_graph('output', strings)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tensorflow.tools.graph_transforms import TransformGraph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "transforms = ['add_default_attributes',\n",
    "             'remove_nodes(op=Identity, op=CheckNumerics, op=Dropout)',\n",
    "             'fold_batch_norms',\n",
    "             'fold_old_batch_norms',\n",
    "             'quantize_weights(fallback_min=-10, fallback_max=10)',\n",
    "             'strip_unused_nodes',\n",
    "             'sort_by_execution_order']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-32-ecde0d9c84a9>:4: FastGFile.__init__ (from tensorflow.python.platform.gfile) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.gfile.GFile.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-32-ecde0d9c84a9>:4: FastGFile.__init__ (from tensorflow.python.platform.gfile) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.gfile.GFile.\n"
     ]
    }
   ],
   "source": [
    "pb = 'output/frozen_model.pb'\n",
    "\n",
    "input_graph_def = tf.GraphDef()\n",
    "with tf.gfile.FastGFile(pb, 'rb') as f:\n",
    "    input_graph_def.ParseFromString(f.read())\n",
    "        \n",
    "inputs = ['Placeholder']\n",
    "transformed_graph_def = TransformGraph(input_graph_def, \n",
    "                                       inputs,\n",
    "                                       ['logits'], transforms)\n",
    "\n",
    "with tf.gfile.GFile(f'{pb}.quantized', 'wb') as f:\n",
    "    f.write(transformed_graph_def.SerializeToString())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_graph(frozen_graph_filename, **kwargs):\n",
    "    with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:\n",
    "        graph_def = tf.GraphDef()\n",
    "        graph_def.ParseFromString(f.read())\n",
    "\n",
    "    # https://github.com/onnx/tensorflow-onnx/issues/77#issuecomment-445066091\n",
    "    # to fix import T5\n",
    "    for node in graph_def.node:\n",
    "        if node.op == 'RefSwitch':\n",
    "            node.op = 'Switch'\n",
    "            for index in xrange(len(node.input)):\n",
    "                if 'moving_' in node.input[index]:\n",
    "                    node.input[index] = node.input[index] + '/read'\n",
    "        elif node.op == 'AssignSub':\n",
    "            node.op = 'Sub'\n",
    "            if 'use_locking' in node.attr:\n",
    "                del node.attr['use_locking']\n",
    "        elif node.op == 'AssignAdd':\n",
    "            node.op = 'Add'\n",
    "            if 'use_locking' in node.attr:\n",
    "                del node.attr['use_locking']\n",
    "        elif node.op == 'Assign':\n",
    "            node.op = 'Identity'\n",
    "            if 'use_locking' in node.attr:\n",
    "                del node.attr['use_locking']\n",
    "            if 'validate_shape' in node.attr:\n",
    "                del node.attr['validate_shape']\n",
    "            if len(node.input) == 2:\n",
    "                node.input[0] = node.input[1]\n",
    "                del node.input[1]\n",
    "\n",
    "    with tf.Graph().as_default() as graph:\n",
    "        tf.import_graph_def(graph_def)\n",
    "    return graph\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "g = load_graph('output/frozen_model.pb')\n",
    "x = g.get_tensor_by_name('import/Placeholder:0')\n",
    "logits = g.get_tensor_by_name('import/logits:0')\n",
    "test_sess = tf.InteractiveSession(graph = g)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 1.95 s, sys: 99.8 ms, total: 2.05 s\n",
      "Wall time: 1.76 s\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'Anda tahu bagaimana ceritanya berlaku. Dua orang yang heteroseksual, hampir-hampir berkulit putih sedang berjuang dalam kehidupan mereka. Mereka saling bertemu, berkumpul, berpisah, berkumpul lagi dan mencari kebahagiaan dan menyelesaikan di antara satu sama lain lengan. Akhirnya, mereka dapat mula menjalani kehidupan biasa! Rom-coms boleh menikmati escapism, tetapi kisah yang diceritakan terlalu kerap, satu yang terlalu kurang kepelbagaian, terlalu bergantung pada stereotaip jantina dan terlalu prihatin dengan menjual jenama cinta yang mustahil untuk hidup hingga: kajian 2008 di Heriot Watt University mendapati bahawa rom-coms memberi kesan negatif terhadap hubungan, menjadikan kita mengejar standard cinta yang tidak dapat dielakkan. Dalam proses penulisan drama baru saya Ross & Rachel, yang menghadapi mitos cinta moden dan terbuka di Edinburgh Festival Fringe Ogos ini, saya harus berfikir banyak mengenai percintaan dalam fiksyen dari Romeo dan Juliet ke Pride dan Prejudice benar-benar untuk Memberitakan Hill. Mengapa kita terus memberitahu kisah yang sama?'"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "%%time\n",
    "l = test_sess.run(logits, feed_dict = {x: s})\n",
    "encoder.decode([i for i in l[0].tolist() if i > 0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "g = load_graph('output/frozen_model.pb.quantized')\n",
    "x = g.get_tensor_by_name('import/Placeholder:0')\n",
    "logits = g.get_tensor_by_name('import/logits:0')\n",
    "test_sess = tf.InteractiveSession(graph = g)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 2.34 s, sys: 182 ms, total: 2.52 s\n",
      "Wall time: 2.01 s\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'Anda tahu bagaimana ceritanya. Dua orang heteroseksual, hampir-hampir berkulit putih sedang berjuang dalam kehidupan mereka. Mereka saling bertemu, berkumpul, berpisah, berkumpul lagi dan mencari kebahagiaan dan menyelesaikan di antara satu sama lain. Akhirnya, mereka dapat mula menjalani kehidupan biasa! Rom-coms boleh menikmati escapism, tetapi kisah yang diceritakan terlalu kerap, satu yang terlalu kurang kepelbagaian, terlalu bergantung pada stereotaip jantina dan terlalu prihatin dengan menjual jenama cinta yang mustahil untuk hidup hingga: kajian 2008 di Heriot Watt University mendapati bahawa rom-coms memberi kesan negatif terhadap hubungan, menjadikan kita mengejar standard cinta yang tidak dapat dielakkan. Dalam proses penulisan drama baru saya Ross & Rachel, yang menghadapi mitos cinta moden dan terbuka di Edinburgh Festival Fringe Ogos ini, saya harus memikirkan banyak percintaan dalam fiksyen dari Romeo dan Juliet ke Pride dan Prejudice benar-benar untuk Memberitakan Hill. Mengapa kita terus memberitahu kisah yang sama?'"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "%%time\n",
    "l = test_sess.run(logits, feed_dict = {x: s})\n",
    "encoder.decode([i for i in l[0].tolist() if i > 0])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
