{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# default_exp embedding_layer.base\n",
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "import os\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n",
    "from nbdev.showdoc import show_doc"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Embedding Layer\n",
    "\n",
    "NOTE: This is not a public API."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Imports"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# export\n",
    "import json\n",
    "from typing import Dict\n",
    "from collections import namedtuple\n",
    "\n",
    "import tensorflow as tf\n",
    "from loguru import logger\n",
    "from m3tl.base_params import BaseParams\n",
    "from m3tl.utils import get_shape_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# export\n",
    "\n",
    "\n",
    "class DefaultMultimodalEmbedding(tf.keras.Model):\n",
    "    def __init__(self, params: BaseParams, embedding_layer: tf.keras.layers.Embedding = None):\n",
    "        super(DefaultMultimodalEmbedding, self).__init__()\n",
    "        self.params = params\n",
    "        self.embedding_layer = embedding_layer\n",
    "\n",
    "        self.embedding_dim = tf.shape(self.embedding_layer.weights[0])[1]\n",
    "        if hasattr(self.embedding_dim, 'numpy'):\n",
    "            self.embedding_dim = self.embedding_dim.numpy()\n",
    "\n",
    "        # create modal type dict\n",
    "        all_problem_info = self.params.get_problem_info()\n",
    "        info_dict = {}\n",
    "        [info_dict.update(d) for d in all_problem_info.values()]\n",
    "        # create modal_name: modal_type dict\n",
    "        self.modal_dict = {k.replace(\n",
    "            '_modal_type', ''): v for k, v in info_dict.items() if '_modal_type' in k}\n",
    "        # put text in front in order to be compatible with old version\n",
    "        modal_tuple_list_for_sort = [\n",
    "            (modal_name, modal_type, 0) if modal_type == 'text' else (\n",
    "                modal_name, modal_type, 1)\n",
    "            for modal_name, modal_type in self.modal_dict.items()\n",
    "        ]\n",
    "        self.ordered_modal_tuple_list = [\n",
    "            (modal_name, modal_type) for modal_name, modal_type, _ in\n",
    "            sorted(modal_tuple_list_for_sort, key=lambda x: x[-1])\n",
    "        ]\n",
    "        if not self.ordered_modal_tuple_list:\n",
    "            raise ValueError(\n",
    "                \"Modal list is empty while creating embedding layer. It's\"\n",
    "                \" most likely because you built the model before dataset is\"\n",
    "                \" created or the current model path is different from the one when \"\n",
    "                \"creating TFRecord. Since the number of modals is not certain before \"\n",
    "                \"we see any data, an error is raised here. To resolve this, \"\n",
    "                \"please call m3tl.input_fn.train_eval_input_fn(params) before\"\n",
    "                \"the model is built or copy files from previous model path.\")\n",
    "        # create modal type ids\n",
    "        self.modal_type_id = {k: i for i, k in enumerate(\n",
    "            sorted(self.modal_dict.keys()))}\n",
    "        logger.critical('Modal Type id mapping: \\n {}'.format(\n",
    "            json.dumps(self.modal_type_id, indent=4)))\n",
    "\n",
    "        # create embedding layer for categorycal modal\n",
    "        self.cate_embedding = {}\n",
    "        for modal_name, modal_type in self.modal_dict.items():\n",
    "            if modal_type == 'category':\n",
    "                modal_info_name = '{}_modal_info'.format(modal_name)\n",
    "                if modal_info_name not in info_dict:\n",
    "                    raise ValueError(\n",
    "                        'category modal {} dose not have modal '\n",
    "                        'info, expect key: {}, receive keys: {}'.format(\n",
    "                            modal_name, modal_info_name, info_dict.keys()))\n",
    "                self.cate_embedding[modal_name] = tf.keras.layers.Embedding(\n",
    "                    input_dim=info_dict[modal_info_name], output_dim=self.embedding_dim)\n",
    "\n",
    "        # create dense layer for converting dimension for array modal\n",
    "        self.multimodal_dense = {modal_name: tf.keras.layers.Dense(\n",
    "            self.embedding_dim) for modal_name, modal_type in self.modal_dict.items()\n",
    "            if modal_type == 'array'}\n",
    "        # multimodal modal type embedding\n",
    "        # this might raise no gradients warning if it's unimodal\n",
    "        # variable: [3, 768]\n",
    "        if self.params.enable_modal_type:\n",
    "            self.modal_type_embedding = tf.keras.layers.Embedding(input_dim=len(\n",
    "                self.modal_dict)+1, output_dim=self.embedding_dim)\n",
    "\n",
    "        self.enable_modal_type = self.params.enable_modal_type\n",
    "\n",
    "        # add modal sep weight\n",
    "        self.sep_embedding = self.add_weight(name='modal_sep_embedding', shape=(\n",
    "            1, 1, self.embedding_dim), dtype=tf.float32)\n",
    "\n",
    "        self.dropout = tf.keras.layers.Dropout(self.params.dropout)\n",
    "\n",
    "    @tf.function\n",
    "    def call(self, inputs, training: bool = True):\n",
    "        features_dict = inputs\n",
    "        res_modal_input = tf.zeros(shape=(1, 1, 1))\n",
    "        res_segment_ids = tf.zeros(shape=(1, 1))\n",
    "        res_input_mask = tf.zeros(shape=(1, 1))\n",
    "        modal_type_ids = tf.zeros(shape=(1, 1))\n",
    "        for modal_idx, (modal_name, modal_type) in enumerate(self.ordered_modal_tuple_list):\n",
    "            tf.autograph.experimental.set_loop_options(\n",
    "                shape_invariants=[(res_modal_input, tf.TensorShape([None, None, None])),\n",
    "                                  (res_segment_ids,\n",
    "                                   tf.TensorShape([None, None])),\n",
    "                                  (res_input_mask, tf.TensorShape(\n",
    "                                      [None, None])),\n",
    "                                  (modal_type_ids,\n",
    "                                   tf.TensorShape([None, None]))\n",
    "                                  ])\n",
    "\n",
    "            input_ids = features_dict['{}_input_ids'.format(modal_name)]\n",
    "            input_mask = features_dict['{}_mask'.format(modal_name)]\n",
    "            segment_ids = features_dict['{}_segment_ids'.format(modal_name)]\n",
    "\n",
    "            sep_embedding = tf.tile(self.sep_embedding, [\n",
    "                                    tf.shape(input_ids)[0], 1, 1])\n",
    "\n",
    "            if modal_type == 'text':\n",
    "                input_shape = get_shape_list(input_ids)\n",
    "                batch_size = input_shape[0]\n",
    "                seq_length = input_shape[1]\n",
    "                if input_mask is None:\n",
    "                    input_mask = tf.ones(\n",
    "                        shape=[batch_size, seq_length], dtype=tf.int32)\n",
    "\n",
    "                if segment_ids is None:\n",
    "                    segment_ids = tf.zeros(\n",
    "                        shape=[batch_size, seq_length], dtype=tf.int32)\n",
    "\n",
    "                modal_input = self.embedding_layer(input_ids)\n",
    "\n",
    "            elif modal_type == 'array':\n",
    "\n",
    "                if not self.enable_modal_type:\n",
    "                    logger.warning('Seems there\\'s a multimodal inputs but params.enable_modal_type is '\n",
    "                                   'not set to be True.')\n",
    "\n",
    "                # convert other modal embeddings to hidden_size\n",
    "                # [batch_size, seq_length, modal_dim] -> [batch_size, seq_length, hidden_size]\n",
    "                modal_input = self.multimodal_dense[modal_name](\n",
    "                    input_ids)\n",
    "            elif modal_type == 'category':\n",
    "                modal_input = self.cate_embedding[modal_name](input_ids)\n",
    "\n",
    "            # add sep embedding\n",
    "            modal_input = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                [modal_input, sep_embedding], axis=1)\n",
    "            # add same type id to left and right\n",
    "            modal_segment_ids = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                [segment_ids,\n",
    "                 tf.expand_dims(segment_ids[:, 0], axis=1)], axis=1)\n",
    "            # add mask\n",
    "            modal_mask = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                [input_mask,\n",
    "                    tf.expand_dims(input_mask[:, 0], axis=1)], axis=1)\n",
    "            this_modal_type_ids = tf.ones_like(\n",
    "                modal_segment_ids) * self.modal_type_id[modal_name]\n",
    "\n",
    "            if modal_idx == 0:\n",
    "                res_modal_input = modal_input\n",
    "                res_segment_ids = modal_segment_ids\n",
    "                res_input_mask = modal_mask\n",
    "                modal_type_ids = this_modal_type_ids\n",
    "            else:\n",
    "                # concat correspondingly\n",
    "                res_modal_input = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                    [res_modal_input, modal_input], axis=1)\n",
    "                res_segment_ids = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                    [res_segment_ids, modal_segment_ids], axis=1)\n",
    "                res_input_mask = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                    [res_input_mask, modal_mask], axis=1)\n",
    "                if self.enable_modal_type:\n",
    "                    modal_type_ids = tf.concat(\n",
    "                        [modal_type_ids, this_modal_type_ids], axis=1)\n",
    "\n",
    "        word_embedding = res_modal_input\n",
    "        if self.enable_modal_type:\n",
    "            word_embedding = word_embedding + \\\n",
    "                self.modal_type_embedding(modal_type_ids)\n",
    "\n",
    "        # apply dropout\n",
    "        word_embedding = self.dropout(word_embedding, training=training)\n",
    "        EmbeddingHidden = namedtuple(\n",
    "            'EmbeddingHidden', ['word_embedding', 'res_input_mask', 'res_segment_ids'])\n",
    "        hidden_feature = EmbeddingHidden(\n",
    "            word_embedding=word_embedding, res_input_mask=res_input_mask, res_segment_ids=res_segment_ids)\n",
    "\n",
    "        return inputs, hidden_feature\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2021-06-15 17:22:35.281 | INFO     | m3tl.base_params:register_multiple_problems:538 - Adding new problem weibo_fake_ner, problem type: seq_tag\n",
      "2021-06-15 17:22:35.282 | INFO     | m3tl.base_params:register_multiple_problems:538 - Adding new problem weibo_fake_multi_cls, problem type: multi_cls\n",
      "2021-06-15 17:22:35.282 | INFO     | m3tl.base_params:register_multiple_problems:538 - Adding new problem weibo_fake_cls, problem type: cls\n",
      "2021-06-15 17:22:35.283 | INFO     | m3tl.base_params:register_multiple_problems:538 - Adding new problem weibo_masklm, problem type: masklm\n",
      "2021-06-15 17:22:35.283 | INFO     | m3tl.base_params:register_multiple_problems:538 - Adding new problem weibo_fake_regression, problem type: regression\n",
      "2021-06-15 17:22:35.284 | INFO     | m3tl.base_params:register_multiple_problems:538 - Adding new problem weibo_fake_vector_fit, problem type: vector_fit\n",
      "2021-06-15 17:22:35.284 | INFO     | m3tl.base_params:register_multiple_problems:538 - Adding new problem weibo_premask_mlm, problem type: premask_mlm\n",
      "2021-06-15 17:22:35.284 | INFO     | m3tl.base_params:register_multiple_problems:538 - Adding new problem fake_contrastive_learning, problem type: contrastive_learning\n",
      "2021-06-15 17:22:35.285 | WARNING  | m3tl.base_params:assign_problem:634 - base_dir and dir_name arguments will be deprecated in the future. Please use model_dir instead.\n",
      "2021-06-15 17:22:35.286 | WARNING  | m3tl.base_params:prepare_dir:361 - bert_config not exists. will load model from huggingface checkpoint.\n",
      "2021-06-15 17:22:40.985 | INFO     | m3tl.utils:set_phase:478 - Setting phase to train\n",
      "2021-06-15 17:22:41.060 | WARNING  | m3tl.read_write_tfrecord:chain_processed_data:248 - Chaining problems with & may consume a lot of memory if data is not pyspark RDD.\n",
      "2021-06-15 17:22:41.077 | DEBUG    | m3tl.read_write_tfrecord:_write_fn:134 - Writing /tmp/tmp6m7cw08n/weibo_fake_cls_weibo_fake_ner_weibo_fake_regression_weibo_fake_vector_fit/train_00000.tfrecord\n",
      "2021-06-15 17:22:41.141 | WARNING  | m3tl.read_write_tfrecord:chain_processed_data:248 - Chaining problems with & may consume a lot of memory if data is not pyspark RDD.\n",
      "2021-06-15 17:22:41.157 | DEBUG    | m3tl.read_write_tfrecord:_write_fn:134 - Writing /tmp/tmp6m7cw08n/weibo_fake_cls_weibo_fake_ner_weibo_fake_regression_weibo_fake_vector_fit/eval_00000.tfrecord\n",
      "2021-06-15 17:22:41.186 | DEBUG    | m3tl.read_write_tfrecord:_write_fn:134 - Writing /tmp/tmp6m7cw08n/weibo_fake_multi_cls/train_00000.tfrecord\n",
      "2021-06-15 17:22:41.211 | DEBUG    | m3tl.read_write_tfrecord:_write_fn:134 - Writing /tmp/tmp6m7cw08n/weibo_fake_multi_cls/eval_00000.tfrecord\n",
      "2021-06-15 17:22:41.291 | DEBUG    | m3tl.read_write_tfrecord:_write_fn:134 - Writing /tmp/tmp6m7cw08n/weibo_masklm/train_00000.tfrecord\n",
      "2021-06-15 17:22:41.340 | DEBUG    | m3tl.read_write_tfrecord:_write_fn:134 - Writing /tmp/tmp6m7cw08n/weibo_masklm/eval_00000.tfrecord\n",
      "2021-06-15 17:22:41.406 | DEBUG    | m3tl.read_write_tfrecord:_write_fn:134 - Writing /tmp/tmp6m7cw08n/weibo_premask_mlm/train_00000.tfrecord\n",
      "2021-06-15 17:22:41.471 | DEBUG    | m3tl.read_write_tfrecord:_write_fn:134 - Writing /tmp/tmp6m7cw08n/weibo_premask_mlm/eval_00000.tfrecord\n",
      "2021-06-15 17:22:41.490 | DEBUG    | m3tl.read_write_tfrecord:_write_fn:134 - Writing /tmp/tmp6m7cw08n/fake_contrastive_learning/train_00000.tfrecord\n",
      "2021-06-15 17:22:41.509 | DEBUG    | m3tl.read_write_tfrecord:_write_fn:134 - Writing /tmp/tmp6m7cw08n/fake_contrastive_learning/eval_00000.tfrecord\n",
      "2021-06-15 17:22:42.546 | INFO     | m3tl.input_fn:train_eval_input_fn:56 - sampling weights: \n",
      "2021-06-15 17:22:42.547 | INFO     | m3tl.input_fn:train_eval_input_fn:57 - {\n",
      "    \"weibo_fake_cls_weibo_fake_ner_weibo_fake_regression_weibo_fake_vector_fit\": 0.2,\n",
      "    \"weibo_fake_multi_cls\": 0.2,\n",
      "    \"weibo_masklm\": 0.2,\n",
      "    \"weibo_premask_mlm\": 0.2,\n",
      "    \"fake_contrastive_learning\": 0.2\n",
      "}\n",
      "2021-06-15 17:22:43.345 | CRITICAL | __main__:__init__:44 - Modal Type id mapping: \n",
      " {\n",
      "    \"array\": 0,\n",
      "    \"cate\": 1,\n",
      "    \"text\": 2\n",
      "}\n",
      "2021-06-15 17:22:43.964 | INFO     | m3tl.input_fn:train_eval_input_fn:56 - sampling weights: \n",
      "2021-06-15 17:22:43.965 | INFO     | m3tl.input_fn:train_eval_input_fn:57 - {\n",
      "    \"weibo_fake_cls_weibo_fake_ner_weibo_fake_regression_weibo_fake_vector_fit\": 0.2,\n",
      "    \"weibo_fake_multi_cls\": 0.2,\n",
      "    \"weibo_masklm\": 0.2,\n",
      "    \"weibo_premask_mlm\": 0.2,\n",
      "    \"fake_contrastive_learning\": 0.2\n",
      "}\n",
      "2021-06-15 17:22:46.253 | INFO     | m3tl.input_fn:train_eval_input_fn:56 - sampling weights: \n",
      "2021-06-15 17:22:46.254 | INFO     | m3tl.input_fn:train_eval_input_fn:57 - {\n",
      "    \"weibo_fake_cls_weibo_fake_ner_weibo_fake_regression_weibo_fake_vector_fit\": 0.2,\n",
      "    \"weibo_fake_multi_cls\": 0.2,\n",
      "    \"weibo_masklm\": 0.2,\n",
      "    \"weibo_premask_mlm\": 0.2,\n",
      "    \"fake_contrastive_learning\": 0.2\n",
      "}\n"
     ]
    }
   ],
   "source": [
    "# hide\n",
    "\n",
    "from m3tl.test_base import TestBase\n",
    "from m3tl.special_tokens import PREDICT\n",
    "tb = TestBase()\n",
    "fake_embedding_table = tb.create_fake_embedding_layer()\n",
    "# need to get one batch data to trigger modal info creation\n",
    "tb.get_one_batch_input()\n",
    "default_layer = DefaultMultimodalEmbedding(params=tb.params, embedding_layer=fake_embedding_table)\n",
    "assert len(tb.test_embedding_layer(default_layer)[1]) == 3\n",
    "assert len(tb.test_embedding_layer(default_layer, mode=PREDICT)[1]) == 3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# export\n",
    "\n",
    "\n",
    "class DuplicateAugMultimodalEmbedding(DefaultMultimodalEmbedding):\n",
    "    \"\"\"\n",
    "    This is majorly for SimCSE and also is a show case of how to\n",
    "    implement in-batch data augmentation\n",
    "    \"\"\"\n",
    "    @tf.function\n",
    "    def call(self, inputs: Dict[str, tf.Tensor], training: bool=True):\n",
    "        # simply copy every tensor and tile on batch_size\n",
    "        # dimension except for loss multiplier\n",
    "        if not self.params.duplicate_data_aug_problems:\n",
    "            logger.warning(\n",
    "                'DuplicateAugMultimodalEmbedding is specified as data augmentation strategy'\n",
    "                ' but params.duplicate_data_aug_problems not set. This augmentation will be IGNORED.')\n",
    "            return DefaultMultimodalEmbedding.call(self, inputs, training)\n",
    "\n",
    "        # get problems that needs to be marked 1\n",
    "        if isinstance(self.params.duplicate_data_aug_problems, str):\n",
    "            dup_data_aug_problems = [\n",
    "                self.params.duplicate_data_aug_problems]\n",
    "        else:\n",
    "            dup_data_aug_problems = self.params.duplicate_data_aug_problems\n",
    "\n",
    "        loss_multiplier_suffix = '_loss_multiplier'\n",
    "        dup_data_aug_loss_multiplier_name = [\n",
    "            '{}{}'.format(p, loss_multiplier_suffix) for p in dup_data_aug_problems]\n",
    "\n",
    "        dup_inputs = {}\n",
    "        for tensor_keys in inputs.keys():\n",
    "            # loss multiplier of duplicate data is 0 by default\n",
    "            if tensor_keys.endswith(loss_multiplier_suffix) and \\\n",
    "                    (tensor_keys not in dup_data_aug_loss_multiplier_name):\n",
    "                dup_inputs[tensor_keys] = tf.concat(\n",
    "                    [inputs[tensor_keys], tf.zeros_like(inputs[tensor_keys])], axis=0)\n",
    "            else:\n",
    "                # repeat tensor\n",
    "                dup_inputs[tensor_keys] = tf.concat(\n",
    "                    [inputs[tensor_keys], inputs[tensor_keys]], axis=0)\n",
    "\n",
    "        # just copy logic above\n",
    "        # TODO: fix this bad approach\n",
    "        features_dict = dup_inputs\n",
    "        res_modal_input = tf.zeros(shape=(1, 1, 1))\n",
    "        res_segment_ids = tf.zeros(shape=(1, 1))\n",
    "        res_input_mask = tf.zeros(shape=(1, 1))\n",
    "        modal_type_ids = tf.zeros(shape=(1, 1))\n",
    "        for modal_idx, (modal_name, modal_type) in enumerate(self.ordered_modal_tuple_list):\n",
    "            tf.autograph.experimental.set_loop_options(\n",
    "                shape_invariants=[(res_modal_input, tf.TensorShape([None, None, None])),\n",
    "                                  (res_segment_ids,\n",
    "                                   tf.TensorShape([None, None])),\n",
    "                                  (res_input_mask, tf.TensorShape(\n",
    "                                      [None, None])),\n",
    "                                  (modal_type_ids,\n",
    "                                   tf.TensorShape([None, None]))\n",
    "                                  ])\n",
    "\n",
    "            input_ids = features_dict['{}_input_ids'.format(modal_name)]\n",
    "            input_mask = features_dict['{}_mask'.format(modal_name)]\n",
    "            segment_ids = features_dict['{}_segment_ids'.format(modal_name)]\n",
    "\n",
    "            sep_embedding = tf.tile(self.sep_embedding, [\n",
    "                                    tf.shape(input_ids)[0], 1, 1])\n",
    "\n",
    "            if modal_type == 'text':\n",
    "                input_shape = get_shape_list(input_ids)\n",
    "                batch_size = input_shape[0]\n",
    "                seq_length = input_shape[1]\n",
    "                if input_mask is None:\n",
    "                    input_mask = tf.ones(\n",
    "                        shape=[batch_size, seq_length], dtype=tf.int32)\n",
    "\n",
    "                if segment_ids is None:\n",
    "                    segment_ids = tf.zeros(\n",
    "                        shape=[batch_size, seq_length], dtype=tf.int32)\n",
    "\n",
    "                modal_input = self.embedding_layer(input_ids)\n",
    "\n",
    "            elif modal_type == 'array':\n",
    "\n",
    "                if not self.enable_modal_type:\n",
    "                    logger.warning('Seems there\\'s a multimodal inputs but params.enable_modal_type is '\n",
    "                                   'not set to be True.')\n",
    "\n",
    "                # convert other modal embeddings to hidden_size\n",
    "                # [batch_size, seq_length, modal_dim] -> [batch_size, seq_length, hidden_size]\n",
    "                modal_input = self.multimodal_dense[modal_name](\n",
    "                    input_ids)\n",
    "            elif modal_type == 'category':\n",
    "                modal_input = self.cate_embedding[modal_name](input_ids)\n",
    "\n",
    "            # add sep embedding\n",
    "            modal_input = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                [modal_input, sep_embedding], axis=1)\n",
    "            # add same type id to left and right\n",
    "            modal_segment_ids = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                [segment_ids,\n",
    "                 tf.expand_dims(segment_ids[:, 0], axis=1)], axis=1)\n",
    "            # add mask\n",
    "            modal_mask = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                [input_mask,\n",
    "                    tf.expand_dims(input_mask[:, 0], axis=1)], axis=1)\n",
    "            this_modal_type_ids = tf.ones_like(\n",
    "                modal_segment_ids) * self.modal_type_id[modal_name]\n",
    "\n",
    "            if modal_idx == 0:\n",
    "                res_modal_input = modal_input\n",
    "                res_segment_ids = modal_segment_ids\n",
    "                res_input_mask = modal_mask\n",
    "                modal_type_ids = this_modal_type_ids\n",
    "            else:\n",
    "                # concat correspondingly\n",
    "                res_modal_input = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                    [res_modal_input, modal_input], axis=1)\n",
    "                res_segment_ids = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                    [res_segment_ids, modal_segment_ids], axis=1)\n",
    "                res_input_mask = tf.concat(  # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n",
    "                    [res_input_mask, modal_mask], axis=1)\n",
    "                if self.enable_modal_type:\n",
    "                    modal_type_ids = tf.concat(\n",
    "                        [modal_type_ids, this_modal_type_ids], axis=1)\n",
    "\n",
    "        word_embedding = res_modal_input\n",
    "        if self.enable_modal_type:\n",
    "            word_embedding = word_embedding + \\\n",
    "                self.modal_type_embedding(modal_type_ids)\n",
    "\n",
    "        # apply dropout\n",
    "        word_embedding = self.dropout(word_embedding, training=training)\n",
    "        EmbeddingHidden = namedtuple(\n",
    "            'EmbeddingHidden', ['word_embedding', 'res_input_mask', 'res_segment_ids'])\n",
    "        hidden_feature = EmbeddingHidden(\n",
    "            word_embedding=word_embedding, res_input_mask=res_input_mask, res_segment_ids=res_segment_ids)\n",
    "\n",
    "        return dup_inputs, hidden_feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2021-06-15 17:30:15.130 | CRITICAL | __main__:__init__:44 - Modal Type id mapping: \n",
      " {\n",
      "    \"array\": 0,\n",
      "    \"cate\": 1,\n",
      "    \"text\": 2\n",
      "}\n",
      "2021-06-15 17:30:15.964 | INFO     | m3tl.input_fn:train_eval_input_fn:56 - sampling weights: \n",
      "2021-06-15 17:30:15.964 | INFO     | m3tl.input_fn:train_eval_input_fn:57 - {\n",
      "    \"weibo_fake_cls_weibo_fake_ner_weibo_fake_regression_weibo_fake_vector_fit\": 0.2,\n",
      "    \"weibo_fake_multi_cls\": 0.2,\n",
      "    \"weibo_masklm\": 0.2,\n",
      "    \"weibo_premask_mlm\": 0.2,\n",
      "    \"fake_contrastive_learning\": 0.2\n",
      "}\n",
      "2021-06-15 17:30:18.515 | INFO     | m3tl.input_fn:train_eval_input_fn:56 - sampling weights: \n",
      "2021-06-15 17:30:18.515 | INFO     | m3tl.input_fn:train_eval_input_fn:57 - {\n",
      "    \"weibo_fake_cls_weibo_fake_ner_weibo_fake_regression_weibo_fake_vector_fit\": 0.2,\n",
      "    \"weibo_fake_multi_cls\": 0.2,\n",
      "    \"weibo_masklm\": 0.2,\n",
      "    \"weibo_premask_mlm\": 0.2,\n",
      "    \"fake_contrastive_learning\": 0.2\n",
      "}\n"
     ]
    }
   ],
   "source": [
    "# hide\n",
    "\n",
    "from m3tl.test_base import TestBase\n",
    "from m3tl.special_tokens import PREDICT\n",
    "\n",
    "tb.params.duplicate_data_aug_problems = ['weibo_fake_ner', 'weibo_fake_multi_cls']\n",
    "default_layer = DuplicateAugMultimodalEmbedding(params=tb.params, embedding_layer=fake_embedding_table)\n",
    "features, dup_emb_output = tb.test_embedding_layer(default_layer)\n",
    "_ = tb.test_embedding_layer(default_layer, mode=PREDICT)\n",
    "\n",
    "# make sure it's dup\n",
    "assert dup_emb_output[0].shape[0] == 64\n",
    "\n",
    "# make sure loss multimplier is correct\n",
    "assert tf.reduce_all(features['weibo_fake_ner_loss_multiplier'][:32] == features['weibo_fake_ner_loss_multiplier'][32:]).numpy()\n",
    "zero_loss_multiplier = features['weibo_fake_regression_loss_multiplier'][32:]\n",
    "assert tf.reduce_all(zero_loss_multiplier == tf.zeros_like(zero_loss_multiplier))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.7.10 64-bit ('base': conda)",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
