{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = '3'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_gan/python/estimator/tpu_gan_estimator.py:42: The name tf.estimator.tpu.TPUEstimator is deprecated. Please use tf.compat.v1.estimator.tpu.TPUEstimator instead.\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_gan/python/estimator/tpu_gan_estimator.py:42: The name tf.estimator.tpu.TPUEstimator is deprecated. Please use tf.compat.v1.estimator.tpu.TPUEstimator instead.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from tensor2tensor.data_generators import problem\n",
    "from tensor2tensor.data_generators import text_problems\n",
    "from tensor2tensor.data_generators import translate\n",
    "from tensor2tensor.layers import common_attention\n",
    "from tensor2tensor.utils import registry\n",
    "from tensor2tensor import problems\n",
    "import tensorflow as tf\n",
    "import os\n",
    "import logging\n",
    "import sentencepiece as spm\n",
    "import transformer_tag\n",
    "from tensor2tensor.layers import modalities"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "vocab = 'sp10m.cased.t5.model'\n",
    "sp = spm.SentencePieceProcessor()\n",
    "sp.Load(vocab)\n",
    "\n",
    "class Encoder:\n",
    "    def __init__(self, sp):\n",
    "        self.sp = sp\n",
    "        self.vocab_size = sp.GetPieceSize() + 100\n",
    "\n",
    "    def encode(self, s):\n",
    "        return self.sp.EncodeAsIds(s)\n",
    "\n",
    "    def decode(self, ids, strip_extraneous = False):\n",
    "        return self.sp.DecodeIds(list(ids))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "d = [\n",
    "    {'class': 0, 'Description': 'PAD', 'salah': '', 'betul': ''},\n",
    "    {\n",
    "        'class': 1,\n",
    "        'Description': 'kesambungan subwords',\n",
    "        'salah': '',\n",
    "        'betul': '',\n",
    "    },\n",
    "    {\n",
    "        'class': 2,\n",
    "        'Description': 'tiada kesalahan',\n",
    "        'salah': '',\n",
    "        'betul': '',\n",
    "    },\n",
    "    {\n",
    "        'class': 3,\n",
    "        'Description': 'kesalahan frasa nama, Perkara yang diterangkan mesti mendahului \"penerang\"',\n",
    "        'salah': 'Cili sos',\n",
    "        'betul': 'sos cili',\n",
    "    },\n",
    "    {\n",
    "        'class': 4,\n",
    "        'Description': 'kesalahan kata jamak',\n",
    "        'salah': 'mereka-mereka',\n",
    "        'betul': 'mereka',\n",
    "    },\n",
    "    {\n",
    "        'class': 5,\n",
    "        'Description': 'kesalahan kata penguat',\n",
    "        'salah': 'sangat tinggi sekali',\n",
    "        'betul': 'sangat tinggi',\n",
    "    },\n",
    "    {\n",
    "        'class': 6,\n",
    "        'Description': 'kata adjektif dan imbuhan \"ter\" tanpa penguat.',\n",
    "        'salah': 'Sani mendapat markah yang tertinggi sekali.',\n",
    "        'betul': 'Sani mendapat markah yang tertinggi.',\n",
    "    },\n",
    "    {\n",
    "        'class': 7,\n",
    "        'Description': 'kesalahan kata hubung',\n",
    "        'salah': 'Sally sedang membaca bila saya tiba di rumahnya.',\n",
    "        'betul': 'Sally sedang membaca apabila saya tiba di rumahnya.',\n",
    "    },\n",
    "    {\n",
    "        'class': 8,\n",
    "        'Description': 'kesalahan kata bilangan',\n",
    "        'salah': 'Beribu peniaga tidak membayar cukai pendapatan.',\n",
    "        'betul': 'Beribu-ribu peniaga tidak membayar cukai pendapatan',\n",
    "    },\n",
    "    {\n",
    "        'class': 9,\n",
    "        'Description': 'kesalahan kata sendi',\n",
    "        'salah': 'Umar telah berpindah daripada sekolah ini bulan lalu.',\n",
    "        'betul': 'Umar telah berpindah dari sekolah ini bulan lalu.',\n",
    "    },\n",
    "    {\n",
    "        'class': 10,\n",
    "        'Description': 'kesalahan penjodoh bilangan',\n",
    "        'salah': 'Setiap orang pelajar',\n",
    "        'betul': 'Setiap pelajar.',\n",
    "    },\n",
    "    {\n",
    "        'class': 11,\n",
    "        'Description': 'kesalahan kata ganti diri',\n",
    "        'salah': 'Pencuri itu telah ditangkap. Beliau dibawa ke balai polis.',\n",
    "        'betul': 'Pencuri itu telah ditangkap. Dia dibawa ke balai polis.',\n",
    "    },\n",
    "    {\n",
    "        'class': 12,\n",
    "        'Description': 'kesalahan ayat pasif',\n",
    "        'salah': 'Cerpen itu telah dikarang oleh saya.',\n",
    "        'betul': 'Cerpen itu telah saya karang.',\n",
    "    },\n",
    "    {\n",
    "        'class': 13,\n",
    "        'Description': 'kesalahan kata tanya',\n",
    "        'salah': 'Kamu berasal dari manakah ?',\n",
    "        'betul': 'Kamu berasal dari mana ?',\n",
    "    },\n",
    "    {\n",
    "        'class': 14,\n",
    "        'Description': 'kesalahan tanda baca',\n",
    "        'salah': 'Kamu berasal dari manakah .',\n",
    "        'betul': 'Kamu berasal dari mana ?',\n",
    "    },\n",
    "    {\n",
    "        'class': 15,\n",
    "        'Description': 'kesalahan kata kerja tak transitif',\n",
    "        'salah': 'Dia kata kepada saya',\n",
    "        'betul': 'Dia berkata kepada saya',\n",
    "    },\n",
    "    {\n",
    "        'class': 16,\n",
    "        'Description': 'kesalahan kata kerja transitif',\n",
    "        'salah': 'Dia suka baca buku',\n",
    "        'betul': 'Dia suka membaca buku',\n",
    "    },\n",
    "    {\n",
    "        'class': 17,\n",
    "        'Description': 'penggunaan kata yang tidak tepat',\n",
    "        'salah': 'Tembuk Besar negeri Cina dibina oleh Shih Huang Ti.',\n",
    "        'betul': 'Tembok Besar negeri Cina dibina oleh Shih Huang Ti',\n",
    "    },\n",
    "]\n",
    "\n",
    "\n",
    "class Tatabahasa:\n",
    "    def __init__(self, d):\n",
    "        self.d = d\n",
    "        self.kesalahan = {i['Description']: no for no, i in enumerate(self.d)}\n",
    "        self.reverse_kesalahan = {v: k for k, v in self.kesalahan.items()}\n",
    "        self.vocab_size = len(self.d)\n",
    "\n",
    "    def encode(self, s):\n",
    "        return [self.kesalahan[i] for i in s]\n",
    "\n",
    "    def decode(self, ids, strip_extraneous = False):\n",
    "        return [self.reverse_kesalahan[i] for i in ids]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "@registry.register_problem\n",
    "class Grammar(text_problems.Text2TextProblem):\n",
    "    \"\"\"grammatical error correction.\"\"\"\n",
    "\n",
    "    def feature_encoders(self, data_dir):\n",
    "        encoder = Encoder(sp)\n",
    "        t = Tatabahasa(d)\n",
    "        return {'inputs': encoder, 'targets': encoder, 'targets_error_tag': t}\n",
    "\n",
    "    def hparams(self, defaults, model_hparams):\n",
    "        super(Grammar, self).hparams(defaults, model_hparams)\n",
    "        if 'use_error_tags' not in model_hparams:\n",
    "            model_hparams.add_hparam('use_error_tags', True)\n",
    "        if 'middle_prediction' not in model_hparams:\n",
    "            model_hparams.add_hparam('middle_prediction', False)\n",
    "        if 'middle_prediction_layer_factor' not in model_hparams:\n",
    "            model_hparams.add_hparam('middle_prediction_layer_factor', 2)\n",
    "        if 'ffn_in_prediction_cascade' not in model_hparams:\n",
    "            model_hparams.add_hparam('ffn_in_prediction_cascade', 1)\n",
    "        if 'error_tag_embed_size' not in model_hparams:\n",
    "            model_hparams.add_hparam('error_tag_embed_size', 12)\n",
    "        if model_hparams.use_error_tags:\n",
    "            defaults.modality[\n",
    "                'targets_error_tag'\n",
    "            ] = modalities.ModalityType.SYMBOL\n",
    "            error_tag_vocab_size = self._encoders[\n",
    "                'targets_error_tag'\n",
    "            ].vocab_size\n",
    "            defaults.vocab_size['targets_error_tag'] = error_tag_vocab_size\n",
    "\n",
    "    def example_reading_spec(self):\n",
    "        data_fields, _ = super(Grammar, self).example_reading_spec()\n",
    "        data_fields['targets_error_tag'] = tf.VarLenFeature(tf.int64)\n",
    "        return data_fields, None\n",
    "\n",
    "    @property\n",
    "    def approx_vocab_size(self):\n",
    "        return 32100\n",
    "\n",
    "    @property\n",
    "    def is_generate_per_split(self):\n",
    "        return False\n",
    "\n",
    "    @property\n",
    "    def dataset_splits(self):\n",
    "        return [\n",
    "            {'split': problem.DatasetSplit.TRAIN, 'shards': 200},\n",
    "            {'split': problem.DatasetSplit.EVAL, 'shards': 1},\n",
    "        ]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "DATA_DIR = os.path.expanduser('t2t-tatabahasa/data')\n",
    "TMP_DIR = os.path.expanduser('t2t-tatabahasa/tmp')\n",
    "TRAIN_DIR = os.path.expanduser('t2t-tatabahasa/train-base')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "PROBLEM = 'grammar'\n",
    "t2t_problem = problems.problem(PROBLEM)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "MODEL = 'transformer_tag'\n",
    "HPARAMS = 'transformer_base'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tensor2tensor.utils.trainer_lib import create_run_config, create_experiment\n",
    "from tensor2tensor.utils.trainer_lib import create_hparams\n",
    "from tensor2tensor.utils import registry\n",
    "from tensor2tensor import models\n",
    "from tensor2tensor import problems\n",
    "from tensor2tensor.utils import trainer_lib"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py:507: calling count_nonzero (from tensorflow.python.ops.math_ops) with axis is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "reduction_indices is deprecated, use axis instead\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py:507: calling count_nonzero (from tensorflow.python.ops.math_ops) with axis is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "reduction_indices is deprecated, use axis instead\n"
     ]
    }
   ],
   "source": [
    "X = tf.placeholder(tf.int32, [None, None], name = 'x_placeholder')\n",
    "Y = tf.placeholder(tf.int32, [None, None], name = 'y_placeholder')\n",
    "targets_error_tag = tf.placeholder(tf.int32, [None, None], 'error_placeholder')\n",
    "X_seq_len = tf.count_nonzero(X, 1, dtype=tf.int32)\n",
    "maxlen_decode = tf.reduce_max(X_seq_len)\n",
    "\n",
    "x = tf.expand_dims(tf.expand_dims(X, -1), -1)\n",
    "y = tf.expand_dims(tf.expand_dims(Y, -1), -1)\n",
    "targets_error_tag_ = tf.expand_dims(tf.expand_dims(targets_error_tag, -1), -1)\n",
    "\n",
    "features = {\n",
    "    \"inputs\": x,\n",
    "    \"targets\": y,\n",
    "    \"target_space_id\": tf.constant(1, dtype=tf.int32),\n",
    "    'targets_error_tag': targets_error_tag,\n",
    "}\n",
    "Modes = tf.estimator.ModeKeys\n",
    "hparams = trainer_lib.create_hparams(HPARAMS, data_dir=DATA_DIR, problem_name=PROBLEM)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "hparams.filter_size = 3072\n",
    "hparams.hidden_size = 768\n",
    "hparams.num_heads = 12\n",
    "hparams.num_hidden_layers = 8\n",
    "hparams.vocab_divisor = 128\n",
    "hparams.dropout = 0.1\n",
    "hparams.max_length = 256\n",
    "\n",
    "# LM\n",
    "hparams.label_smoothing = 0.0\n",
    "hparams.shared_embedding_and_softmax_weights = False\n",
    "hparams.eval_drop_long_sequences = True\n",
    "hparams.max_length = 256\n",
    "hparams.multiproblem_mixing_schedule = 'pretrain'\n",
    "\n",
    "# tpu\n",
    "hparams.symbol_modality_num_shards = 1\n",
    "hparams.attention_dropout_broadcast_dims = '0,1'\n",
    "hparams.relu_dropout_broadcast_dims = '1'\n",
    "hparams.layer_prepostprocess_dropout_broadcast_dims = '1'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting T2TModel mode to 'infer'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting T2TModel mode to 'infer'\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.dropout to 0.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.dropout to 0.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.label_smoothing to 0.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.label_smoothing to 0.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.layer_prepostprocess_dropout to 0.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.layer_prepostprocess_dropout to 0.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.symbol_dropout to 0.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.symbol_dropout to 0.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.attention_dropout to 0.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.attention_dropout to 0.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.relu_dropout to 0.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Setting hparams.relu_dropout to 0.0\n"
     ]
    }
   ],
   "source": [
    "model = registry.model(MODEL)(hparams, Modes.PREDICT)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# logits = model(features)\n",
    "# logits\n",
    "\n",
    "# sess = tf.InteractiveSession()\n",
    "# sess.run(tf.global_variables_initializer())\n",
    "# l = sess.run(logits, feed_dict = {X: [[10,10, 10, 10,10,1],[10,10, 10, 10,10,1]],\n",
    "#                              Y: [[10,10, 10, 10,10,1],[10,10, 10, 10,10,1]],\n",
    "#                              targets_error_tag: [[10,10, 10, 10,10,1],\n",
    "#                                                 [10,10, 10, 10,10,1]]})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensor2tensor-1.15.7-py3.6.egg/tensor2tensor/layers/common_attention.py:931: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.cast` instead.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensor2tensor-1.15.7-py3.6.egg/tensor2tensor/layers/common_attention.py:931: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.cast` instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensor2tensor-1.15.7-py3.6.egg/tensor2tensor/models/transformer.py:96: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensor2tensor-1.15.7-py3.6.egg/tensor2tensor/models/transformer.py:96: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensor2tensor-1.15.7-py3.6.egg/tensor2tensor/utils/expert_utils.py:621: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.where in 2.0, which has the same broadcast rule as np.where\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensor2tensor-1.15.7-py3.6.egg/tensor2tensor/utils/expert_utils.py:621: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.where in 2.0, which has the same broadcast rule as np.where\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensor2tensor-1.15.7-py3.6.egg/tensor2tensor/utils/expert_utils.py:621: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.cast` instead.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensor2tensor-1.15.7-py3.6.egg/tensor2tensor/utils/expert_utils.py:621: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.cast` instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/b2b/transformer_tag.py:1164: to_int64 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.cast` instead.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/b2b/transformer_tag.py:1164: to_int64 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.cast` instead.\n"
     ]
    }
   ],
   "source": [
    "features = {\n",
    "    \"inputs\": x,\n",
    "    \"target_space_id\": tf.constant(1, dtype=tf.int32),\n",
    "}\n",
    "\n",
    "with tf.variable_scope(tf.get_variable_scope(), reuse = False):\n",
    "    fast_result = model._greedy_infer(features, maxlen_decode)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "result_seq = tf.identity(fast_result['outputs'], name = 'greedy')\n",
    "result_tag = tf.identity(fast_result['outputs_tag'], name = 'tag_greedy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tensor2tensor.layers import common_layers\n",
    "\n",
    "def accuracy_per_sequence(predictions, targets, weights_fn = common_layers.weights_nonzero):\n",
    "    padded_predictions, padded_labels = common_layers.pad_with_zeros(predictions, targets)\n",
    "    weights = weights_fn(padded_labels)\n",
    "    padded_labels = tf.to_int32(padded_labels)\n",
    "    padded_predictions = tf.to_int32(padded_predictions)\n",
    "    not_correct = tf.to_float(tf.not_equal(padded_predictions, padded_labels)) * weights\n",
    "    axis = list(range(1, len(padded_predictions.get_shape())))\n",
    "    correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))\n",
    "    return tf.reduce_mean(correct_seq)\n",
    "\n",
    "def padded_accuracy(predictions, targets, weights_fn = common_layers.weights_nonzero):\n",
    "    padded_predictions, padded_labels = common_layers.pad_with_zeros(predictions, targets)\n",
    "    weights = weights_fn(padded_labels)\n",
    "    padded_labels = tf.to_int32(padded_labels)\n",
    "    padded_predictions = tf.to_int32(padded_predictions)\n",
    "    n = tf.to_float(tf.equal(padded_predictions, padded_labels)) * weights\n",
    "    d = tf.reduce_sum(weights)\n",
    "    return tf.reduce_sum(n) / d"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "acc_seq = padded_accuracy(result_seq, Y)\n",
    "acc_tag = padded_accuracy(result_tag, targets_error_tag)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'t2t-tatabahasa/train-base/model.ckpt-140000'"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ckpt_path = tf.train.latest_checkpoint(os.path.join(TRAIN_DIR))\n",
    "ckpt_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "sess = tf.InteractiveSession()\n",
    "sess.run(tf.global_variables_initializer())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from t2t-tatabahasa/train-base/model.ckpt-140000\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from t2t-tatabahasa/train-base/model.ckpt-140000\n"
     ]
    }
   ],
   "source": [
    "var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n",
    "saver = tf.train.Saver(var_list = var_lists)\n",
    "saver.restore(sess, ckpt_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle\n",
    "\n",
    "with open('../pure-text/dataset-tatabahasa.pkl', 'rb') as fopen:\n",
    "    data = pickle.load(fopen)\n",
    "\n",
    "encoder = Encoder(sp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_xy(row, encoder):\n",
    "    x, y, tag = [], [], []\n",
    "\n",
    "    for i in range(len(row[0])):\n",
    "        t = encoder.encode(row[0][i][0])\n",
    "        y.extend(t)\n",
    "        t = encoder.encode(row[1][i][0])\n",
    "        x.extend(t)\n",
    "        tag.extend([row[1][i][1]] * len(t))\n",
    "\n",
    "    # EOS\n",
    "    x.append(1)\n",
    "    y.append(1)\n",
    "    tag.append(0)\n",
    "\n",
    "    return x, y, tag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "x, y, tag = get_xy(data[10], encoder)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "e = encoder.encode('Pilih mana jurusan yang sesuai dengan kebolehan anda dalam peperiksaan Sijil Pelajaran Malaysia semasa memohon kemasukan ke institusi pengajian tinggi.') + [1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "r = sess.run(fast_result, \n",
    "         feed_dict = {X: [e]})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[ 2,  4,  4,  4,  2,  2,  2,  2,  2, 11,  2,  2,  2,  2,  2,  2,\n",
       "         2,  2,  2,  2,  2,  2,  2,  0]])"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "r['outputs_tag']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Pilih mana-mana jurusan yang sesuai dengan kebolehan beliau dalam peperiksaan Sijil Pelajaran Malaysia semasa memohon kemasukan ke institusi pengajian tinggi.'"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "encoder.decode(r['outputs'][0].tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Marta Vieira da Silva ( lahir 19 Februari 1986 ) yang biasanya dikenali sebagai Marta merupakan seorang bola pemain sepak Brazil yang main laksanabagai penyerang posisi hingga kelab Liga Bola Sepak Wanita Nasional , Orlando Pride sungguhpun juga pasukan sepak kebangsaan bola wanita Brazil .'"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "encoder.decode(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Marta Vieira da Silva ( lahir 19 Februari 1986 ) yang biasanya dikenali sebagai Marta merupakan seorang pemain bola sepak Brazil yang bermain dalam posisi penyerang untuk kelab Liga Bola Sepak Wanita Nasional , Orlando Pride dan juga pasukan bola sepak kebangsaan wanita Brazil .'"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "encoder.decode(y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'targets': VarLenFeature(dtype=tf.int64),\n",
       " 'inputs': VarLenFeature(dtype=tf.int64),\n",
       " 'targets_error_tag': VarLenFeature(dtype=tf.int64)}"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "hparams.problem.example_reading_spec()[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [],
   "source": [
    "def parse(serialized_example):\n",
    "\n",
    "    data_fields = hparams.problem.example_reading_spec()[0]\n",
    "    features = tf.parse_single_example(\n",
    "        serialized_example, features = data_fields\n",
    "    )\n",
    "    for k in features.keys():\n",
    "        features[k] = features[k].values\n",
    "\n",
    "    return features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/autograph/converters/directives.py:119: The name tf.parse_single_example is deprecated. Please use tf.io.parse_single_example instead.\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/autograph/converters/directives.py:119: The name tf.parse_single_example is deprecated. Please use tf.io.parse_single_example instead.\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-33-50d600d7a4c1>:14: DatasetV1.make_one_shot_iterator (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `for ... in dataset:` to iterate over a dataset. If using `tf.estimator`, return the `Dataset` object directly from your input function. As a last resort, you can use `tf.compat.v1.data.make_one_shot_iterator(dataset)`.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-33-50d600d7a4c1>:14: DatasetV1.make_one_shot_iterator (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `for ... in dataset:` to iterate over a dataset. If using `tf.estimator`, return the `Dataset` object directly from your input function. As a last resort, you can use `tf.compat.v1.data.make_one_shot_iterator(dataset)`.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'inputs': <tf.Tensor 'IteratorGetNext:0' shape=(?, ?) dtype=int64>,\n",
       " 'targets': <tf.Tensor 'IteratorGetNext:1' shape=(?, ?) dtype=int64>,\n",
       " 'targets_error_tag': <tf.Tensor 'IteratorGetNext:2' shape=(?, ?) dtype=int64>}"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dataset = tf.data.TFRecordDataset('t2t-tatabahasa/data/grammar-dev-00000-of-00001')\n",
    "dataset = dataset.map(parse, num_parallel_calls=32)\n",
    "dataset = dataset.padded_batch(32, \n",
    "    padded_shapes = {\n",
    "    'inputs': tf.TensorShape([None]),\n",
    "    'targets': tf.TensorShape([None]),\n",
    "    'targets_error_tag': tf.TensorShape([None])\n",
    "    },\n",
    "    padding_values = {\n",
    "        'inputs': tf.constant(0, dtype = tf.int64),\n",
    "        'targets': tf.constant(0, dtype = tf.int64),\n",
    "        'targets_error_tag': tf.constant(0, dtype = tf.int64),\n",
    "    })\n",
    "dataset = dataset.make_one_shot_iterator().get_next()\n",
    "dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "done 0\n",
      "done 1\n",
      "done 2\n",
      "done 3\n",
      "done 4\n",
      "done 5\n",
      "done 6\n",
      "done 7\n",
      "done 8\n",
      "done 9\n",
      "done 10\n",
      "done 11\n",
      "done 12\n",
      "done 13\n",
      "done 14\n",
      "done 15\n",
      "done 16\n",
      "done 17\n",
      "done 18\n",
      "done 19\n",
      "done 20\n",
      "done 21\n",
      "done 22\n",
      "done 23\n",
      "done 24\n",
      "done 25\n",
      "done 26\n",
      "done 27\n",
      "done 28\n",
      "done 29\n",
      "done 30\n",
      "done 31\n",
      "done 32\n",
      "done 33\n",
      "done 34\n",
      "done 35\n",
      "done 36\n",
      "done 37\n",
      "done 38\n",
      "done 39\n",
      "done 40\n",
      "done 41\n",
      "done 42\n",
      "done 43\n",
      "done 44\n",
      "done 45\n",
      "done 46\n",
      "done 47\n",
      "done 48\n",
      "done 49\n",
      "done 50\n",
      "done 51\n",
      "done 52\n",
      "done 53\n",
      "done 54\n",
      "done 55\n",
      "done 56\n",
      "done 57\n",
      "done 58\n",
      "done 59\n",
      "done 60\n",
      "done 61\n",
      "done 62\n",
      "done 63\n",
      "done 64\n",
      "done 65\n",
      "done 66\n",
      "done 67\n",
      "done 68\n",
      "done 69\n",
      "done 70\n",
      "done 71\n",
      "done 72\n",
      "done 73\n",
      "done 74\n",
      "done 75\n",
      "done 76\n",
      "done 77\n",
      "done 78\n",
      "done 79\n",
      "done 80\n",
      "done 81\n",
      "done 82\n",
      "done 83\n",
      "done 84\n",
      "done 85\n",
      "done 86\n",
      "done 87\n",
      "done 88\n",
      "done 89\n",
      "done 90\n",
      "done 91\n",
      "done 92\n",
      "done 93\n",
      "done 94\n",
      "done 95\n",
      "done 96\n",
      "done 97\n",
      "done 98\n",
      "done 99\n",
      "done 100\n",
      "done 101\n",
      "done 102\n",
      "done 103\n",
      "done 104\n",
      "done 105\n",
      "done 106\n",
      "done 107\n",
      "done 108\n",
      "done 109\n",
      "done 110\n",
      "done 111\n",
      "done 112\n",
      "done 113\n",
      "done 114\n",
      "done 115\n",
      "done 116\n",
      "done 117\n",
      "done 118\n",
      "done 119\n",
      "done 120\n",
      "done 121\n",
      "done 122\n",
      "done 123\n",
      "done 124\n",
      "done 125\n",
      "done 126\n",
      "done 127\n",
      "done 128\n",
      "done 129\n",
      "done 130\n",
      "done 131\n",
      "done 132\n",
      "done 133\n",
      "done 134\n",
      "done 135\n",
      "done 136\n",
      "done 137\n",
      "done 138\n",
      "done 139\n",
      "done 140\n",
      "done 141\n",
      "done 142\n",
      "done 143\n",
      "done 144\n",
      "done 145\n",
      "done 146\n",
      "done 147\n",
      "done 148\n",
      "done 149\n",
      "done 150\n",
      "done 151\n",
      "done 152\n",
      "done 153\n",
      "done 154\n",
      "done 155\n",
      "done 156\n",
      "done 157\n",
      "done 158\n",
      "done 159\n",
      "done 160\n",
      "done 161\n",
      "done 162\n",
      "done 163\n",
      "done 164\n",
      "done 165\n",
      "done 166\n",
      "done 167\n",
      "done 168\n",
      "done 169\n",
      "done 170\n",
      "done 171\n",
      "done 172\n",
      "done 173\n",
      "done 174\n",
      "done 175\n",
      "done 176\n",
      "done 177\n",
      "done 178\n",
      "done 179\n",
      "done 180\n",
      "done 181\n",
      "done 182\n",
      "done 183\n",
      "done 184\n",
      "done 185\n",
      "done 186\n",
      "done 187\n",
      "done 188\n",
      "done 189\n",
      "done 190\n",
      "done 191\n",
      "done 192\n",
      "done 193\n",
      "done 194\n",
      "done 195\n",
      "done 196\n",
      "done 197\n",
      "done 198\n",
      "done 199\n",
      "done 200\n",
      "done 201\n",
      "done 202\n",
      "done 203\n",
      "done 204\n",
      "done 205\n",
      "done 206\n",
      "done 207\n",
      "done 208\n",
      "done 209\n",
      "done 210\n",
      "done 211\n",
      "done 212\n",
      "done 213\n",
      "done 214\n",
      "done 215\n",
      "done 216\n",
      "done 217\n",
      "done 218\n",
      "done 219\n",
      "done 220\n",
      "done 221\n",
      "done 222\n",
      "done 223\n",
      "done 224\n",
      "done 225\n",
      "done 226\n",
      "done 227\n",
      "done 228\n",
      "done 229\n",
      "done 230\n",
      "done 231\n",
      "done 232\n",
      "done 233\n",
      "done 234\n",
      "done 235\n",
      "done 236\n",
      "done 237\n",
      "done 238\n",
      "done 239\n",
      "done 240\n",
      "done 241\n",
      "done 242\n",
      "done 243\n",
      "done 244\n",
      "done 245\n",
      "done 246\n",
      "done 247\n",
      "done 248\n",
      "done 249\n",
      "done 250\n",
      "done 251\n",
      "done 252\n",
      "done 253\n",
      "done 254\n",
      "done 255\n",
      "done 256\n",
      "done 257\n",
      "done 258\n",
      "done 259\n",
      "done 260\n",
      "done 261\n",
      "done 262\n",
      "done 263\n",
      "done 264\n",
      "done 265\n",
      "done 266\n",
      "done 267\n",
      "done 268\n",
      "done 269\n",
      "done 270\n",
      "done 271\n",
      "done 272\n",
      "done 273\n",
      "done 274\n",
      "done 275\n",
      "done 276\n",
      "done 277\n",
      "done 278\n",
      "done 279\n",
      "done 280\n",
      "done 281\n",
      "done 282\n",
      "done 283\n",
      "done 284\n",
      "done 285\n",
      "done 286\n",
      "done 287\n",
      "done 288\n",
      "done 289\n",
      "done 290\n",
      "done 291\n",
      "done 292\n",
      "done 293\n",
      "done 294\n",
      "done 295\n",
      "done 296\n",
      "done 297\n",
      "done 298\n",
      "done 299\n",
      "done 300\n",
      "done 301\n",
      "done 302\n",
      "done 303\n",
      "done 304\n",
      "done 305\n",
      "done 306\n",
      "done 307\n",
      "done 308\n",
      "done 309\n",
      "done 310\n",
      "done 311\n",
      "done 312\n",
      "done 313\n",
      "done 314\n",
      "done 315\n",
      "done 316\n"
     ]
    }
   ],
   "source": [
    "seqs, tags = [], []\n",
    "index = 0\n",
    "while True:\n",
    "    try:\n",
    "        d = sess.run(dataset)\n",
    "        s, t = sess.run([acc_seq, acc_tag], feed_dict = {X:d['inputs'], \n",
    "                                              Y: d['targets'], \n",
    "                                              targets_error_tag: d['targets_error_tag']})\n",
    "        seqs.append(s)\n",
    "        tags.append(t)\n",
    "        print(f'done {index}')\n",
    "        index += 1\n",
    "    except:\n",
    "        break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(0.93897253, 0.977407)"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.mean(seqs), np.mean(tags)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'transformertag-base/model.ckpt'"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "saver = tf.train.Saver(tf.trainable_variables())\n",
    "saver.save(sess, 'transformertag-base/model.ckpt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['x_placeholder',\n",
       " 'transformer_tag/body/target_space_embedding/kernel',\n",
       " 'transformer_tag/body/encoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_0/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/encoder/layer_0/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/encoder/layer_0/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/encoder/layer_0/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/encoder/layer_0/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_0/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_0/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/encoder/layer_0/ffn/conv1/bias',\n",
       " 'transformer_tag/body/encoder/layer_0/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/encoder/layer_0/ffn/conv2/bias',\n",
       " 'transformer_tag/body/encoder/layer_1/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_1/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_1/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/encoder/layer_1/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/encoder/layer_1/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/encoder/layer_1/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/encoder/layer_1/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_1/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_1/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/encoder/layer_1/ffn/conv1/bias',\n",
       " 'transformer_tag/body/encoder/layer_1/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/encoder/layer_1/ffn/conv2/bias',\n",
       " 'transformer_tag/body/encoder/layer_2/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_2/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_2/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/encoder/layer_2/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/encoder/layer_2/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/encoder/layer_2/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/encoder/layer_2/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_2/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_2/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/encoder/layer_2/ffn/conv1/bias',\n",
       " 'transformer_tag/body/encoder/layer_2/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/encoder/layer_2/ffn/conv2/bias',\n",
       " 'transformer_tag/body/encoder/layer_3/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_3/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_3/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/encoder/layer_3/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/encoder/layer_3/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/encoder/layer_3/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/encoder/layer_3/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_3/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_3/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/encoder/layer_3/ffn/conv1/bias',\n",
       " 'transformer_tag/body/encoder/layer_3/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/encoder/layer_3/ffn/conv2/bias',\n",
       " 'transformer_tag/body/encoder/layer_4/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_4/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_4/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/encoder/layer_4/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/encoder/layer_4/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/encoder/layer_4/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/encoder/layer_4/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_4/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_4/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/encoder/layer_4/ffn/conv1/bias',\n",
       " 'transformer_tag/body/encoder/layer_4/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/encoder/layer_4/ffn/conv2/bias',\n",
       " 'transformer_tag/body/encoder/layer_5/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_5/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_5/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/encoder/layer_5/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/encoder/layer_5/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/encoder/layer_5/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/encoder/layer_5/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_5/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_5/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/encoder/layer_5/ffn/conv1/bias',\n",
       " 'transformer_tag/body/encoder/layer_5/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/encoder/layer_5/ffn/conv2/bias',\n",
       " 'transformer_tag/body/encoder/layer_6/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_6/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_6/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/encoder/layer_6/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/encoder/layer_6/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/encoder/layer_6/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/encoder/layer_6/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_6/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_6/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/encoder/layer_6/ffn/conv1/bias',\n",
       " 'transformer_tag/body/encoder/layer_6/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/encoder/layer_6/ffn/conv2/bias',\n",
       " 'transformer_tag/body/encoder/layer_7/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_7/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_7/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/encoder/layer_7/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/encoder/layer_7/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/encoder/layer_7/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/encoder/layer_7/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_7/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/encoder/layer_7/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/encoder/layer_7/ffn/conv1/bias',\n",
       " 'transformer_tag/body/encoder/layer_7/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/encoder/layer_7/ffn/conv2/bias',\n",
       " 'transformer_tag/body/encoder/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/encoder/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_0/encdec_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_0/encdec_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_1/encdec_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_1/encdec_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_2/encdec_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_2/encdec_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_3/encdec_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_3/encdec_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_4/encdec_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_4/encdec_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_5/encdec_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_5/encdec_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_6/encdec_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_6/encdec_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_7/encdec_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_7/encdec_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/edit_ops_layer/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/edit_ops_layer/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/edit_ops_layer/ffn/layer_prepostprocess_1/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/edit_ops_layer/ffn/layer_prepostprocess_1/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/edit_ops_layer/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/edit_ops_layer/ffn/conv1/bias',\n",
       " 'transformer_tag/body/edit_ops_layer/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/edit_ops_layer/ffn/conv2/bias',\n",
       " 'transformer_tag/body/decoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_0/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_0/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_0/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_0/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_0/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_0/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_0/encdec_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_0/encdec_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_0/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_0/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_0/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/decoder/layer_0/ffn/conv1/bias',\n",
       " 'transformer_tag/body/decoder/layer_0/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/decoder/layer_0/ffn/conv2/bias',\n",
       " 'transformer_tag/body/decoder/layer_1/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_1/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_1/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_1/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_1/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_1/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_1/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_1/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_1/encdec_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_1/encdec_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_1/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_1/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_1/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/decoder/layer_1/ffn/conv1/bias',\n",
       " 'transformer_tag/body/decoder/layer_1/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/decoder/layer_1/ffn/conv2/bias',\n",
       " 'transformer_tag/body/decoder/layer_2/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_2/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_2/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_2/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_2/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_2/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_2/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_2/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_2/encdec_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_2/encdec_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_2/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_2/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_2/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/decoder/layer_2/ffn/conv1/bias',\n",
       " 'transformer_tag/body/decoder/layer_2/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/decoder/layer_2/ffn/conv2/bias',\n",
       " 'transformer_tag/body/decoder/layer_3/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_3/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_3/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_3/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_3/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_3/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_3/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_3/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_3/encdec_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_3/encdec_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_3/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_3/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_3/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/decoder/layer_3/ffn/conv1/bias',\n",
       " 'transformer_tag/body/decoder/layer_3/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/decoder/layer_3/ffn/conv2/bias',\n",
       " 'transformer_tag/body/decoder/layer_4/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_4/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_4/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_4/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_4/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_4/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_4/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_4/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_4/encdec_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_4/encdec_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_4/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_4/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_4/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/decoder/layer_4/ffn/conv1/bias',\n",
       " 'transformer_tag/body/decoder/layer_4/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/decoder/layer_4/ffn/conv2/bias',\n",
       " 'transformer_tag/body/decoder/layer_5/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_5/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_5/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_5/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_5/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_5/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_5/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_5/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_5/encdec_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_5/encdec_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_5/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_5/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_5/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/decoder/layer_5/ffn/conv1/bias',\n",
       " 'transformer_tag/body/decoder/layer_5/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/decoder/layer_5/ffn/conv2/bias',\n",
       " 'transformer_tag/body/decoder/layer_6/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_6/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_6/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_6/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_6/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_6/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_6/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_6/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_6/encdec_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_6/encdec_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_6/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_6/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_6/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/decoder/layer_6/ffn/conv1/bias',\n",
       " 'transformer_tag/body/decoder/layer_6/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/decoder/layer_6/ffn/conv2/bias',\n",
       " 'transformer_tag/body/decoder/layer_7/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_7/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_7/self_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_7/self_attention/multihead_attention/k/kernel',\n",
       " 'transformer_tag/body/decoder/layer_7/self_attention/multihead_attention/v/kernel',\n",
       " 'transformer_tag/body/decoder/layer_7/self_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_7/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_7/encdec_attention/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_7/encdec_attention/multihead_attention/q/kernel',\n",
       " 'transformer_tag/body/decoder/layer_7/encdec_attention/multihead_attention/output_transform/kernel',\n",
       " 'transformer_tag/body/decoder/layer_7/ffn/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_7/ffn/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/decoder/layer_7/ffn/conv1/kernel',\n",
       " 'transformer_tag/body/decoder/layer_7/ffn/conv1/bias',\n",
       " 'transformer_tag/body/decoder/layer_7/ffn/conv2/kernel',\n",
       " 'transformer_tag/body/decoder/layer_7/ffn/conv2/bias',\n",
       " 'transformer_tag/body/decoder/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/decoder/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/error_tag_prediction/projection/bottleneck/kernel',\n",
       " 'transformer_tag/body/error_tag_prediction/projection/bottleneck/bias',\n",
       " 'transformer_tag/body/error_tag_prediction/projection/logits/kernel',\n",
       " 'transformer_tag/body/error_tag_prediction/embedding/embedding/kernel',\n",
       " 'transformer_tag/body/error_tag_prediction/embedding/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/error_tag_prediction/embedding/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/post_error_tag/layer_0/layer_prepostprocess/layer_norm/layer_norm_scale',\n",
       " 'transformer_tag/body/post_error_tag/layer_0/layer_prepostprocess/layer_norm/layer_norm_bias',\n",
       " 'transformer_tag/body/post_error_tag/layer_0/conv1/kernel',\n",
       " 'transformer_tag/body/post_error_tag/layer_0/conv1/bias',\n",
       " 'transformer_tag/body/post_error_tag/layer_0/conv2/kernel',\n",
       " 'transformer_tag/body/post_error_tag/layer_0/conv2/bias',\n",
       " 'greedy',\n",
       " 'tag_greedy']"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "strings = ','.join(\n",
    "    [\n",
    "        n.name\n",
    "        for n in tf.get_default_graph().as_graph_def().node\n",
    "        if ('Variable' in n.op\n",
    "        or 'Placeholder' in n.name\n",
    "        or 'greedy' in n.name\n",
    "        or 'tag_greedy' in n.name\n",
    "        or 'x_placeholder' in n.name\n",
    "        or 'self/Softmax' in n.name)\n",
    "        and 'adam' not in n.name\n",
    "        and 'beta' not in n.name\n",
    "        and 'global_step' not in n.name\n",
    "        and 'modality' not in n.name\n",
    "        and 'Assign' not in n.name\n",
    "    ]\n",
    ")\n",
    "strings.split(',')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "def freeze_graph(model_dir, output_node_names):\n",
    "\n",
    "    if not tf.gfile.Exists(model_dir):\n",
    "        raise AssertionError(\n",
    "            \"Export directory doesn't exists. Please specify an export \"\n",
    "            'directory: %s' % model_dir\n",
    "        )\n",
    "\n",
    "    checkpoint = tf.train.get_checkpoint_state(model_dir)\n",
    "    input_checkpoint = checkpoint.model_checkpoint_path\n",
    "\n",
    "    absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])\n",
    "    output_graph = absolute_model_dir + '/frozen_model.pb'\n",
    "    clear_devices = True\n",
    "    with tf.Session(graph = tf.Graph()) as sess:\n",
    "        saver = tf.train.import_meta_graph(\n",
    "            input_checkpoint + '.meta', clear_devices = clear_devices\n",
    "        )\n",
    "        saver.restore(sess, input_checkpoint)\n",
    "        output_graph_def = tf.graph_util.convert_variables_to_constants(\n",
    "            sess,\n",
    "            tf.get_default_graph().as_graph_def(),\n",
    "            output_node_names.split(','),\n",
    "        )\n",
    "        with tf.gfile.GFile(output_graph, 'wb') as f:\n",
    "            f.write(output_graph_def.SerializeToString())\n",
    "        print('%d ops in the final graph.' % len(output_graph_def.node))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from transformertag-base/model.ckpt\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from transformertag-base/model.ckpt\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-38-9a7215a4e58a>:23: convert_variables_to_constants (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.compat.v1.graph_util.convert_variables_to_constants`\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-38-9a7215a4e58a>:23: convert_variables_to_constants (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.compat.v1.graph_util.convert_variables_to_constants`\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/framework/graph_util_impl.py:277: extract_sub_graph (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.compat.v1.graph_util.extract_sub_graph`\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/framework/graph_util_impl.py:277: extract_sub_graph (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.compat.v1.graph_util.extract_sub_graph`\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Froze 269 variables.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Froze 269 variables.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Converted 269 variables to const ops.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Converted 269 variables to const ops.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "7435 ops in the final graph.\n"
     ]
    }
   ],
   "source": [
    "freeze_graph('transformertag-base', strings)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_graph(frozen_graph_filename):\n",
    "    with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:\n",
    "        graph_def = tf.GraphDef()\n",
    "        graph_def.ParseFromString(f.read())\n",
    "    with tf.Graph().as_default() as graph:\n",
    "        tf.import_graph_def(graph_def)\n",
    "    return graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/client/session.py:1750: UserWarning: An interactive session is already active. This can cause out-of-memory errors in some cases. You must explicitly call `InteractiveSession.close()` to release resources held by the other session(s).\n",
      "  warnings.warn('An interactive session is already active. This can '\n"
     ]
    }
   ],
   "source": [
    "g = load_graph('transformertag-base/frozen_model.pb')\n",
    "x = g.get_tensor_by_name('import/x_placeholder:0')\n",
    "greedy = g.get_tensor_by_name('import/greedy:0')\n",
    "tag_greedy = g.get_tensor_by_name('import/tag_greedy:0')\n",
    "test_sess = tf.InteractiveSession(graph = g)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[array([[ 4881, 27510,   158, ...,     1,     1,     1],\n",
       "        [  720,    17,   130, ...,     1,     1,     1],\n",
       "        [27130,     7, 29076, ...,     1,     1,     1],\n",
       "        ...,\n",
       "        [16256,  5222,    36, ...,     1,    16,     1],\n",
       "        [ 1151,   787,    27, ...,     1,     1,     1],\n",
       "        [  104,    89,  3502, ...,     1,     1,     3]]),\n",
       " array([[2, 3, 3, ..., 0, 0, 0],\n",
       "        [2, 2, 2, ..., 0, 0, 0],\n",
       "        [2, 2, 2, ..., 0, 0, 0],\n",
       "        ...,\n",
       "        [2, 2, 2, ..., 0, 2, 0],\n",
       "        [2, 2, 2, ..., 0, 0, 0],\n",
       "        [2, 2, 2, ..., 0, 0, 2]])]"
      ]
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_sess.run([greedy, tag_greedy], feed_dict = {x:d['inputs']})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.tools.graph_transforms import TransformGraph\n",
    "from glob import glob\n",
    "tf.set_random_seed(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow_text\n",
    "import tf_sentencepiece"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-45-4ca23320d2af>:12: FastGFile.__init__ (from tensorflow.python.platform.gfile) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.gfile.GFile.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-45-4ca23320d2af>:12: FastGFile.__init__ (from tensorflow.python.platform.gfile) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.gfile.GFile.\n"
     ]
    }
   ],
   "source": [
    "transforms = ['add_default_attributes',\n",
    "             'remove_nodes(op=Identity, op=CheckNumerics, op=Dropout)',\n",
    "             'fold_constants(ignore_errors=true)',\n",
    "             'fold_batch_norms',\n",
    "             'fold_old_batch_norms',\n",
    "             'quantize_weights(fallback_min=-10, fallback_max=10)',\n",
    "             'strip_unused_nodes',\n",
    "             'sort_by_execution_order']\n",
    "\n",
    "pb = 'transformertag-base/frozen_model.pb'\n",
    "input_graph_def = tf.GraphDef()\n",
    "with tf.gfile.FastGFile(pb, 'rb') as f:\n",
    "    input_graph_def.ParseFromString(f.read())\n",
    "    \n",
    "transformed_graph_def = TransformGraph(input_graph_def, \n",
    "                                       ['x_placeholder'],\n",
    "                                       ['greedy', 'tag_greedy'], transforms)\n",
    "\n",
    "with tf.gfile.GFile(f'{pb}.quantized', 'wb') as f:\n",
    "    f.write(transformed_graph_def.SerializeToString())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "g = load_graph('transformertag-base/frozen_model.pb.quantized')\n",
    "x = g.get_tensor_by_name('import/x_placeholder:0')\n",
    "greedy = g.get_tensor_by_name('import/greedy:0')\n",
    "tag_greedy = g.get_tensor_by_name('import/tag_greedy:0')\n",
    "test_sess = tf.InteractiveSession(graph = g)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[array([[ 4881, 27510,   158, ...,     1,     1,     1],\n",
       "        [  720,    17,   130, ...,     1,     1,     1],\n",
       "        [27130,     7, 29076, ...,     1,     1,     1],\n",
       "        ...,\n",
       "        [16256,  5222,    36, ...,    16,     1,     1],\n",
       "        [ 1151,   787,    27, ...,    15,     3,     1],\n",
       "        [  104,    89,  3502, ...,     1,     1,     1]]),\n",
       " array([[2, 3, 3, ..., 0, 0, 0],\n",
       "        [2, 2, 2, ..., 0, 0, 0],\n",
       "        [2, 2, 2, ..., 0, 0, 0],\n",
       "        ...,\n",
       "        [2, 2, 2, ..., 2, 0, 0],\n",
       "        [2, 2, 2, ..., 2, 2, 0],\n",
       "        [2, 2, 2, ..., 0, 0, 0]])]"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_sess.run([greedy, tag_greedy], feed_dict = {x:d['inputs']})"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
