{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "respiratory-commerce",
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "wired-valuable",
   "metadata": {},
   "outputs": [],
   "source": [
    "from data_process import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "global-charity",
   "metadata": {},
   "outputs": [],
   "source": [
    "seq, labels = get_seq_entity(open(\"data/train.conll\").readlines())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "hidden-heart",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "czech-individual",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained(\"bert-base-uncased\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "imported-bandwidth",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "id": "fifteen-freeware",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[tokenizing]: 8855it [00:00, 48134.23it/s]\n",
      "[building labels]: 8855it [00:00, 148803.31it/s]\n"
     ]
    }
   ],
   "source": [
    "input_ids, attention_mask, token_type_ids, labels,_ = preprocess_train(\n",
    "        [\"data/train.conll\"],\n",
    "        tokenizer,\n",
    "        128,\n",
    "        False,\n",
    "        include_fake_label=False,\n",
    "        is_augment=False,\n",
    "        augment_times=1,\n",
    "        regex_rate=1,\n",
    "        sample_weight_rate=None\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "id": "mounted-manchester",
   "metadata": {},
   "outputs": [],
   "source": [
    "labels = np.argmax(labels,axis=-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "unlike-bunch",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "id": "level-resolution",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[ True,  True,  True, ...,  True,  True,  True],\n",
       "       [ True,  True,  True, ...,  True,  True,  True],\n",
       "       [ True,  True,  True, ...,  True,  True,  True],\n",
       "       ...,\n",
       "       [ True,  True,  True, ...,  True,  True,  True],\n",
       "       [ True,  True,  True, ...,  True,  True,  True],\n",
       "       [ True,  True,  True, ...,  True,  True,  True]])"
      ]
     },
     "execution_count": 60,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "labels[10] ==labels[10].reshape(128*57).reshape((128,57))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "id": "swedish-geography",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(8855, 128)"
      ]
     },
     "execution_count": 52,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "labels.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "composite-assistant",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "professional-addition",
   "metadata": {},
   "outputs": [],
   "source": [
    "input_ids = tf.convert_to_tensor(input_ids) #  train_input_ids[0] is a list of input ids and train_input_ids is a list of lists\n",
    "attention_mask = tf.convert_to_tensor(attention_mask) #  as above\n",
    "token_type_ids = tf.convert_to_tensor(token_type_ids) #  as above\n",
    "labels = tf.convert_to_tensor(labels) #  as above\n",
    "train_dataset = tf.data.Dataset.from_tensor_slices((train_inputs,  labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "unexpected-statement",
   "metadata": {},
   "outputs": [],
   "source": [
    "writer = tf.data.experimental.TFRecordWriter(\"demo.record\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "split-cruise",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<TensorSliceDataset shapes: ({input_ids: (128,), attention_mask: (128,), token_type_ids: (128,)}, (128, 57)), types: ({input_ids: tf.float64, attention_mask: tf.float64, token_type_ids: tf.float64}, tf.float64)>"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "suburban-efficiency",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "def separate_x_y(dict, x_keys, y_keys):\n",
    "    \"\"\"Separate dataset into a tuple (X, Y)\n",
    "    Args:\n",
    "        dict ([type]): Each entry in tf dataset\n",
    "        x_keys ([type]): List of key values\n",
    "        y_keys ([type]): List of ky values\n",
    "    Returns:\n",
    "        tuple of each entry\n",
    "    \"\"\"\n",
    "    X = {}\n",
    "    Y = {}\n",
    "    for k, v in dict.items():\n",
    "        if k in x_keys:\n",
    "            X[k] = v\n",
    "            continue\n",
    "        if k in y_keys:\n",
    "            Y[k] = v\n",
    "    return (X, Y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "dramatic-freight",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "import collections\n",
    "import json\n",
    "import os\n",
    "import random\n",
    "\n",
    "import six\n",
    "import tensorflow as tf\n",
    "from absl import logging\n",
    "\n",
    "logging.set_verbosity(\"INFO\")\n",
    "\n",
    "\n",
    "def _bytes_feature(value):\n",
    "    \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n",
    "    if isinstance(value, type(tf.constant(0))):\n",
    "        # BytesList won't unpack a string from an EagerTensor.\n",
    "        value = value.numpy()\n",
    "    if isinstance(value, list):\n",
    "        value = [six.ensure_binary(token) for token in value]\n",
    "        return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n",
    "        # value = str([six.ensure_text(token, \"utf-8\") for \\\n",
    "        # token in value]).encode()\n",
    "    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n",
    "\n",
    "\n",
    "def _float_feature(value):\n",
    "    \"\"\"Returns a float_list from a float / double.\"\"\"\n",
    "    if isinstance(value, list):\n",
    "        return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n",
    "    return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n",
    "\n",
    "\n",
    "def _int_feature(values):\n",
    "    if isinstance(values, int):\n",
    "        values = [values]\n",
    "    feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n",
    "    return feature\n",
    "\n",
    "\n",
    "TF_SCHEMA = {\"var_len\": tf.io.VarLenFeature, \"fixed_len\": tf.io.FixedLenFeature}\n",
    "\n",
    "TF_VALUE = {\"bytes\": tf.string, \"int\": tf.int64, \"float\": tf.float32}\n",
    "\n",
    "TF_FUNC = {\"bytes\": _bytes_feature, \"int\": _int_feature, \"float\": _float_feature}\n",
    "\n",
    "\n",
    "class TFWriter(object):\n",
    "    \"\"\"TFWriter class . This class is responsible\n",
    "    to write tfrecords, based on given schema and data.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self,\n",
    "        schema,\n",
    "        file_name=\"file\",\n",
    "        model_dir=None,\n",
    "        tag=\"dev\",\n",
    "        n_files=10,\n",
    "        shuffle=True,\n",
    "        max_files_per_record=10000,\n",
    "        overwrite=False,\n",
    "        verbose_counter=1000,\n",
    "    ):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            schema: dict - (this is where schema of the tfrecords specified)\n",
    "            file_name: str - file name\n",
    "            model_dir: str - TFRecords will write to this\n",
    "                             model dir . If not given, use the default directory\n",
    "            tag: str - 'train' or 'dev'\n",
    "            n_files: int - If `tag` == 'train': file will be\n",
    "                     split into `n_fles` for randomness\n",
    "            shuffle: bool\n",
    "            max_files_per_record: No of individual files\n",
    "                    (can be a sentence/ tokenized sentence) per record\n",
    "            overwrite: bool - If True, we will overwrite\n",
    "                    tfrecords of the same name\n",
    "        Raises:\n",
    "            Error if the model_dir / the file exists .\n",
    "            You can pass overwrite = True to disable this behaviour\n",
    "        \"\"\"\n",
    "\n",
    "        self.shuffle = shuffle\n",
    "        self.max_files_per_record = max_files_per_record\n",
    "        # Schema Check\n",
    "        self.is_schema_valid(schema)\n",
    "        self.tag = tag\n",
    "\n",
    "        if tag not in [\"train\", \"dev\"]:\n",
    "            logging.info(\"Unknown tag {} found\".format(tag))\n",
    "            raise Exception(\"Unknwon Tag\")\n",
    "\n",
    "        def is_check(all_files):\n",
    "            for file_ in all_files:\n",
    "                if os.path.exists(file_):\n",
    "                    logging.info(\n",
    "                        \"File exists, overwrite is not recommended. \\\n",
    "                        If you want to overwrite, pass `overwrite`=True\"\n",
    "                    )\n",
    "                    raise FileExistsError(file_)\n",
    "\n",
    "        # we need this file to write the schema to the model_dir\n",
    "        schema_file_name = \"schema.json\"\n",
    "        if model_dir:\n",
    "            if overwrite is False:\n",
    "                if os.path.exists(model_dir):\n",
    "                    logging.info(\"Model directory {} exists\".format(model_dir))\n",
    "                    raise FileExistsError(model_dir)\n",
    "            os.makedirs(model_dir, exist_ok=True)\n",
    "            self.file_name = file_name = os.path.join(model_dir, file_name.replace(\".tfrecord\", \"\"))\n",
    "            schema_file_name = os.path.join(model_dir, schema_file_name)\n",
    "\n",
    "        if tag == \"train\":\n",
    "            if self.shuffle:\n",
    "                self.all_files = [\"{}_{}_{}_{}.tfrecord\".format(file_name, tag, i, n_files) for i in range(n_files)]\n",
    "                self.examples_per_record = {file_: 0 for file_ in self.all_files}\n",
    "                if overwrite is False:\n",
    "                    is_check(self.all_files)\n",
    "                self.all_writer = [tf.io.TFRecordWriter(file_) for file_ in self.all_files]\n",
    "            else:\n",
    "                self.current_writer = 0\n",
    "                self.temp_writers = []\n",
    "                self.examples_per_record = {}\n",
    "                self.current_file_name = \"{}_{}_{}.tfrecord\".format(self.file_name, self.tag, self.current_writer)\n",
    "                self.examples_per_record[self.current_file_name] = 0\n",
    "                self.current_file = tf.io.TFRecordWriter(self.current_file_name)\n",
    "                self.temp_writers.append(self.current_file)\n",
    "\n",
    "        else:\n",
    "            n_files = 1\n",
    "            self.all_files = [\"{}_{}_{}_{}.tfrecord\".format(file_name, i, tag, n_files) for i in range(n_files)]\n",
    "            if overwrite is False:\n",
    "                is_check(self.all_files)\n",
    "            self.all_writer = [tf.io.TFRecordWriter(file_) for file_ in self.all_files]\n",
    "\n",
    "        self.schema = schema\n",
    "        self.schema_writer_fn = self.generate_schema_from_dict(schema)\n",
    "\n",
    "        self.verbose_counter = verbose_counter\n",
    "        self.global_counter = 0\n",
    "\n",
    "        # Save schema for further reading\n",
    "        with open(schema_file_name, \"w\") as f:\n",
    "            json.dump(schema, f, indent=2)\n",
    "\n",
    "    def is_schema_valid(self, schema):\n",
    "        \"\"\"\n",
    "        simple schema validation check\n",
    "        \"\"\"\n",
    "        for k, v in schema.items():\n",
    "            if v[0] == \"var_len\":\n",
    "                assert len(v) == 2\n",
    "                assert v[1] in TF_VALUE\n",
    "\n",
    "            if v[0] == \"fixed_len\":\n",
    "                assert len(v) == 3\n",
    "                assert v[1] in TF_VALUE\n",
    "                assert isinstance(v[2], list)\n",
    "\n",
    "    def close_sess(self):\n",
    "        if self.shuffle:\n",
    "            for file_writer in self.all_writer:\n",
    "                file_writer.close()\n",
    "            logging.info(\"All writer objects closed\")\n",
    "        else:\n",
    "            for file_writer in self.temp_writers:\n",
    "                file_writer.close()\n",
    "            logging.info(\"All writer objects closed\")\n",
    "\n",
    "    def generate_schema_from_dict(self, schema_dict):\n",
    "        \"\"\"\n",
    "        schema_dict: a dict\n",
    "        \"\"\"\n",
    "        allowed_schema_types = [\"var_len\", \"fixed_len\"]\n",
    "        allowed_schema_values = [\"bytes\", \"int\", \"float\"]\n",
    "\n",
    "        def check_schema(schema_dict):\n",
    "            for _, value in schema_dict.items():\n",
    "                schema_key = value[0]\n",
    "                schema_value = value[1]\n",
    "                if schema_key not in allowed_schema_types:\n",
    "                    error_message = \"{} not in {}\".format(schema_key, allowed_schema_types)\n",
    "                    raise ValueError(error_message)\n",
    "                if schema_value not in allowed_schema_values:\n",
    "                    error_message = \"{} not in {}\".format(schema_value, allowed_schema_values)\n",
    "                    raise ValueError(error_message)\n",
    "\n",
    "        check_schema(schema_dict)\n",
    "\n",
    "        schema_writer_dict = {}\n",
    "        for key, value in schema_dict.items():\n",
    "            schema_writer_dict[key] = TF_FUNC[value[1]]  # _bytes_feature\n",
    "        return schema_writer_dict\n",
    "\n",
    "    def process(self, parse_fn):\n",
    "        \"\"\"This function will iterate over parse_fn and keep writing it TFRecord\"\"\"\n",
    "        \"\"\"\n",
    "        parse_fn: function which should be an iterator or generator\n",
    "        \"\"\"\n",
    "        if hasattr(parse_fn, \"__iter__\") and not hasattr(parse_fn, \"__len__\"):\n",
    "            for entry in parse_fn:\n",
    "                self.write_record(entry)\n",
    "            logging.info(\"Total individual observations/examples written is {}\".format(self.global_counter))\n",
    "            self.close_sess()\n",
    "        else:\n",
    "            raise ValueError(\"Expected `parse_fn` to be a generator/iterator \")\n",
    "\n",
    "    def write_record(self, input):\n",
    "        \"\"\"Writes a input to a TFRecord example.\"\"\"\n",
    "        \"\"\"\n",
    "        input: dict (dict of key, elem to write to tf-record)\n",
    "        \"\"\"\n",
    "        features = collections.OrderedDict()\n",
    "        for key, value in input.items():\n",
    "            if self.schema[key][0] == \"fixed_len\":\n",
    "                if self.schema[key][2] != []:\n",
    "                    shape = self.schema[key][2][0]\n",
    "                    if len(value) != shape:\n",
    "                        raise ValueError(\n",
    "                            \"`{}` has schema shape `{}`, but provided \\\n",
    "                              values `{}` has shape `{}`\".format(\n",
    "                                key, shape, value, len(value)  # noqa\n",
    "                            )\n",
    "                        )\n",
    "\n",
    "            if isinstance(value, six.text_type):\n",
    "                value = six.ensure_binary(value, \"utf-8\")\n",
    "            features[key] = self.schema_writer_fn[key](value)\n",
    "        example_proto = tf.train.Example(features=tf.train.Features(feature=features))\n",
    "\n",
    "        if self.tag == \"train\":\n",
    "            if self.shuffle:\n",
    "                index = random.choice(range(len(self.all_writer)))\n",
    "                the_writer = self.all_writer[index]\n",
    "                the_writer.write(example_proto.SerializeToString())\n",
    "                self.examples_per_record[self.all_files[index]] += 1\n",
    "                self.global_counter += 1\n",
    "            else:\n",
    "\n",
    "                # If global counter(no of individual records processed)\n",
    "                # exceeds max_files_per_record then increment self.current_writer\n",
    "                if self.global_counter > (self.current_writer + 1) * self.max_files_per_record:\n",
    "                    self.current_writer += 1\n",
    "                    self.current_file_name = \"{}_{}_{}.tfrecord\".format(self.file_name, self.tag, self.current_writer)\n",
    "                    self.examples_per_record[self.current_file_name] = 0\n",
    "                    self.current_file = tf.io.TFRecordWriter(self.current_file_name)\n",
    "                    self.temp_writers.append(self.current_file)\n",
    "\n",
    "                the_writer = self.current_file\n",
    "                the_writer.write(example_proto.SerializeToString())\n",
    "                self.examples_per_record[self.current_file_name] += 1\n",
    "                self.global_counter += 1\n",
    "\n",
    "            if self.global_counter % self.verbose_counter == 0:\n",
    "                logging.info(\"Wrote {} tfrecods\".format(self.global_counter))\n",
    "        else:\n",
    "            the_writer = self.all_writer[0]\n",
    "            the_writer.write(example_proto.SerializeToString())\n",
    "            self.global_counter += 1\n",
    "\n",
    "            if self.global_counter % self.verbose_counter == 0:\n",
    "                logging.info(\"Wrote {} tfrecods\".format(self.global_counter))\n",
    "\n",
    "\n",
    "class TFReader(object):\n",
    "    \"\"\"\n",
    "    TFReader class . This class is responsible\n",
    "    to read tfrecords, based on given schema.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, schema, tfrecord_files, shuffle_files=False, keys=[]):\n",
    "\n",
    "        if not isinstance(tfrecord_files, (list, tuple)):\n",
    "            raise Exception(\"input must be a list or tuple of files\")\n",
    "        self.schema = schema\n",
    "        self.tfrecord_files = tfrecord_files\n",
    "        self.shuffle_files = shuffle_files\n",
    "        self.keys = keys\n",
    "        if self.keys == []:\n",
    "            self.keys = self.schema.keys()\n",
    "        self.schema_reader_fn, self.schema_writer_fn = self.generate_schema_from_dict(schema)\n",
    "\n",
    "    def generate_schema_from_dict(self, schema_dict):\n",
    "        \"\"\"\n",
    "        schema_dict: a dict\n",
    "        \"\"\"\n",
    "        allowed_schema_types = [\"var_len\", \"fixed_len\"]\n",
    "        allowed_schema_values = [\"bytes\", \"int\", \"float\"]\n",
    "\n",
    "        def check_schema(schema_dict):\n",
    "            for _, value in schema_dict.items():\n",
    "                schema_key = value[0]\n",
    "                schema_value = value[1]\n",
    "                if schema_key not in allowed_schema_types:\n",
    "                    error_message = \"{} not in {}\".format(schema_key, allowed_schema_types)\n",
    "                    raise ValueError(error_message)\n",
    "                if schema_value not in allowed_schema_values:\n",
    "                    error_message = \"{} not in {}\".format(schema_value, allowed_schema_values)\n",
    "                    raise ValueError(error_message)\n",
    "\n",
    "        check_schema(schema_dict)\n",
    "\n",
    "        # Schema reader function is here\n",
    "\n",
    "        schema_reader_dict = {}\n",
    "        for key, value in schema_dict.items():\n",
    "            if self.keys and key not in self.keys:\n",
    "                continue\n",
    "\n",
    "            if value[0] == \"var_len\":\n",
    "                schema_reader_dict[key] = tf.io.VarLenFeature(TF_VALUE[value[1]])\n",
    "            if value[0] == \"fixed_len\":\n",
    "                # Fixed len should have shape mentioned in the schema\n",
    "                shape = value[2]\n",
    "                schema_reader_dict[key] = tf.io.FixedLenFeature(\n",
    "                    shape=shape, dtype=TF_VALUE[value[1]], default_value=None\n",
    "                )\n",
    "\n",
    "        schema_writer_dict = {}\n",
    "        for key, value in schema_dict.items():\n",
    "            schema_writer_dict[key] = TF_FUNC[value[1]]  # _bytes_feature\n",
    "        return schema_reader_dict, schema_writer_dict\n",
    "\n",
    "    def decode_record_var(self, record, keys=[]):\n",
    "        \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n",
    "        feature_dict = tf.io.parse_single_example(record, self.schema_reader_fn)\n",
    "\n",
    "        parse_dict = feature_dict.copy()\n",
    "        for k in self.keys:\n",
    "            v = feature_dict[k]\n",
    "            if v.dtype == tf.int64:\n",
    "                v = tf.cast(v, tf.int32)\n",
    "            if self.schema[k][0] == \"var_len\":\n",
    "                parse_dict[k] = tf.sparse.to_dense(v)\n",
    "\n",
    "        return parse_dict\n",
    "\n",
    "    def auto_batch(\n",
    "        self,\n",
    "        tf_dataset,\n",
    "        batch_size,\n",
    "        padded_values=None,\n",
    "        padded_shapes=None,\n",
    "        x_keys=None,\n",
    "        y_keys=None,\n",
    "        shuffle=False,\n",
    "        drop_remainder=False,\n",
    "        shuffle_buffer_size=10000,\n",
    "        prefetch_buffer_size=100,\n",
    "    ):\n",
    "        \"\"\"Auto Batching\n",
    "        Args:\n",
    "            tf_dataset : TF dataset\n",
    "            x_keys (optional): List of key names. We will filter based on this.\n",
    "            y_keys (optional): List of key names.\n",
    "            shuffle (bool, optional): [description]. Defaults to False.\n",
    "            shuffle_buffer_size (int, optional): [description]. Defaults to 10000.\n",
    "        Returns:\n",
    "            batched tf dataset\n",
    "        \"\"\"\n",
    "        element_spec = tf_dataset.element_spec\n",
    "        _padded_values = {}\n",
    "        if not padded_values:\n",
    "            padded_values = {}\n",
    "        # sometimes we might have to have sme custom values other than 0\n",
    "        for k, v in element_spec.items():\n",
    "            if k in padded_values:\n",
    "                value = padded_values[k]\n",
    "                _padded_values[k] = tf.constant(value, dtype=value.dtype)\n",
    "            else:\n",
    "                _padded_values[k] = tf.constant(0, dtype=v.dtype)\n",
    "        dataset = tf_dataset.padded_batch(\n",
    "            padding_values=_padded_values,\n",
    "            padded_shapes=padded_shapes,\n",
    "            batch_size=batch_size,\n",
    "            drop_remainder=drop_remainder,\n",
    "        )\n",
    "        # fmt: off\n",
    "        if x_keys and y_keys:\n",
    "            dataset = dataset.map(lambda x: separate_x_y(x, x_keys, y_keys), num_parallel_calls=tf.data.experimental.AUTOTUNE)  # noqa\n",
    "        # fmt: on\n",
    "        if shuffle:\n",
    "            dataset = dataset.shuffle(shuffle_buffer_size, seed=None, reshuffle_each_iteration=True)\n",
    "        dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n",
    "        return dataset\n",
    "\n",
    "    def read_record(self, keys=[], auto_batch=False, **kwargs):\n",
    "        \"\"\"Read TF records\n",
    "        Args:\n",
    "            keys (list, optional): List of keys to read from the records\n",
    "            auto_batch (bool, optional): Whethe to auto batch data\n",
    "        Returns:\n",
    "            [type]: [description]\n",
    "        \"\"\"\n",
    "        dataset = tf.data.Dataset.list_files(self.tfrecord_files, shuffle=self.shuffle_files)\n",
    "        dataset = dataset.interleave(\n",
    "            tf.data.TFRecordDataset,\n",
    "            cycle_length=8,\n",
    "            num_parallel_calls=tf.data.experimental.AUTOTUNE,\n",
    "        )\n",
    "\n",
    "        def decode_fn(record):\n",
    "            return self.decode_record_var(record, keys)\n",
    "\n",
    "        dataset = dataset.map(decode_fn)\n",
    "        # Using `ignore_errors()` will drop the element that causes an error.\n",
    "        dataset = dataset.apply(tf.data.experimental.ignore_errors())  # ==> {1., 0.5, 0.2}\n",
    "        if auto_batch:\n",
    "            dataset = self.auto_batch(dataset, **kwargs)\n",
    "        return dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "id": "liable-thanks",
   "metadata": {},
   "outputs": [],
   "source": [
    "tf_dummy_schema = {\n",
    "\n",
    "    \"input_ids\": (\"var_len\", \"int\"),\n",
    "    \"attention_mask\": (\"var_len\", \"int\"),\n",
    "\n",
    "    \"token_type_ids\": (\"var_len\", \"int\"),\n",
    "\n",
    "    \"labels\": (\"var_len\", \"float\")\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "mechanical-audio",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_inputs = {\n",
    "    'input_ids': input_ids,\n",
    "    'attention_mask': attention_mask, \n",
    "    'token_type_ids': token_type_ids,\n",
    "    'labels':labels\n",
    "\n",
    "}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "id": "settled-inspiration",
   "metadata": {},
   "outputs": [],
   "source": [
    "tf_writer = TFWriter(tf_dummy_schema, \"train\", tag=\"train\", overwrite=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "capable-charm",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "moved-flood",
   "metadata": {},
   "outputs": [],
   "source": [
    "np.shape()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "id": "oriented-coverage",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(128, 57)"
      ]
     },
     "execution_count": 50,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "labels[0].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "fossil-rates",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 11%|█         | 965/8855 [00:00<00:05, 1377.14it/s]INFO:absl:Wrote 1000 tfrecods\n",
      " 22%|██▏       | 1937/8855 [00:01<00:04, 1384.06it/s]INFO:absl:Wrote 2000 tfrecods\n",
      " 33%|███▎      | 2908/8855 [00:02<00:04, 1381.43it/s]INFO:absl:Wrote 3000 tfrecods\n",
      " 44%|████▍     | 3887/8855 [00:02<00:03, 1392.07it/s]INFO:absl:Wrote 4000 tfrecods\n",
      " 55%|█████▍    | 4867/8855 [00:03<00:02, 1392.45it/s]INFO:absl:Wrote 5000 tfrecods\n",
      " 68%|██████▊   | 5980/8855 [00:04<00:02, 1344.72it/s]INFO:absl:Wrote 6000 tfrecods\n",
      " 79%|███████▊  | 6952/8855 [00:05<00:01, 1363.49it/s]INFO:absl:Wrote 7000 tfrecods\n",
      " 90%|████████▉ | 7929/8855 [00:05<00:00, 1392.87it/s]INFO:absl:Wrote 8000 tfrecods\n",
      "100%|██████████| 8855/8855 [00:06<00:00, 1380.73it/s]\n"
     ]
    }
   ],
   "source": [
    "for i in tqdm(range(len(input_ids))):\n",
    "    \n",
    "    train_inputs = {\n",
    "        'input_ids': input_ids[i].astype(int),\n",
    "        'attention_mask': attention_mask[i].astype(int), \n",
    "        'token_type_ids': token_type_ids[i].astype(int),\n",
    "        'labels':labels[i].reshape(128*57).astype(float).tolist()\n",
    "\n",
    "    }\n",
    "    tf_writer.write_record(train_inputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "id": "general-calcium",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "All files ['train.tfrecord_train_8_10.tfrecord', 'train.tfrecord_train_1_10.tfrecord', 'train.tfrecord_train_2_10.tfrecord', 'train.tfrecord_train_9_10.tfrecord', 'train.py', 'train.tfrecord_train_6_10.tfrecord', 'train.tfrecord_train_3_10.tfrecord', 'train.tfrecord_train_0_10.tfrecord', 'train.tfrecord_train_5_10.tfrecord', 'train.tfrecord_train_7_10.tfrecord', 'train.tfrecord_train_4_10.tfrecord']\n"
     ]
    }
   ],
   "source": [
    "import glob\n",
    "\n",
    "all_files = glob.glob(\"train**\")\n",
    "print(\"All files\", all_files)\n",
    "tf_reader = TFReader(tf_dummy_schema, all_files)\n",
    "dataset = tf_reader.read_record()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "id": "republican-penalty",
   "metadata": {},
   "outputs": [],
   "source": [
    "def label_encode(example):\n",
    "    example[\"labels\"] = tf.reshape(example[\"labels\"],(128,57))\n",
    "    return example"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "id": "infectious-outdoors",
   "metadata": {},
   "outputs": [],
   "source": [
    "dataset = dataset.map(label_encode)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "id": "applied-resource",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'attention_mask': <tf.Tensor: shape=(128,), dtype=int32, numpy=\n",
      "array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,\n",
      "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)>, 'input_ids': <tf.Tensor: shape=(128,), dtype=int32, numpy=\n",
      "array([ 101,  100, 1897, 1920,  100,  100,  100,  100, 1902,  100,  100,\n",
      "       1902,  100,  100, 1946, 1966, 1753, 1926, 1878,  102,    0,    0,\n",
      "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
      "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
      "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
      "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
      "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
      "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
      "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
      "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
      "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
      "          0,    0,    0,    0,    0,    0,    0], dtype=int32)>, 'labels': <tf.Tensor: shape=(128, 57), dtype=float32, numpy=\n",
      "array([[0., 0., 0., ..., 0., 0., 0.],\n",
      "       [0., 1., 0., ..., 0., 0., 0.],\n",
      "       [0., 0., 0., ..., 0., 0., 0.],\n",
      "       ...,\n",
      "       [0., 0., 0., ..., 0., 0., 0.],\n",
      "       [0., 0., 0., ..., 0., 0., 0.],\n",
      "       [0., 0., 0., ..., 0., 0., 0.]], dtype=float32)>, 'token_type_ids': <tf.Tensor: shape=(128,), dtype=int32, numpy=\n",
      "array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)>}\n"
     ]
    }
   ],
   "source": [
    "for item in dataset.take(1):\n",
    "    print(item)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "informal-topic",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:absl:Wrote 1000 tfrecods\n",
      "INFO:absl:Wrote 2000 tfrecods\n",
      "INFO:absl:Wrote 3000 tfrecods\n",
      "INFO:absl:Wrote 4000 tfrecods\n",
      "INFO:absl:Wrote 5000 tfrecods\n",
      "INFO:absl:Wrote 6000 tfrecods\n",
      "INFO:absl:Wrote 7000 tfrecods\n",
      "INFO:absl:Wrote 8000 tfrecods\n",
      "INFO:absl:Wrote 9000 tfrecods\n",
      "INFO:absl:Wrote 10000 tfrecods\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "All files ['test.tfrecord_train_4_10.tfrecord', 'test.tfrecord_train_6_10.tfrecord', 'test.tfrecord_train_7_10.tfrecord', 'test.tfrecord_train_2_10.tfrecord', 'test.tfrecord_train_8_10.tfrecord', 'test.tfrecord_train_0_10.tfrecord', 'test.tfrecord_train_5_10.tfrecord', 'test.tfrecord_train_9_10.tfrecord', 'test.tfrecord_train_3_10.tfrecord', 'test.tfrecord_train_1_10.tfrecord']\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:absl:Wrote 1000 tfrecods\n",
      "INFO:absl:Wrote 2000 tfrecods\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "All files ['test2.tfrecord_train_8_10.tfrecord', 'test2.tfrecord_train_5_10.tfrecord', 'test2.tfrecord_train_2_10.tfrecord', 'test2.tfrecord_train_0_10.tfrecord', 'test2.tfrecord_train_6_10.tfrecord', 'test2.tfrecord_train_7_10.tfrecord', 'test2.tfrecord_train_9_10.tfrecord', 'test2.tfrecord_train_3_10.tfrecord', 'test2.tfrecord_train_4_10.tfrecord', 'test2.tfrecord_train_1_10.tfrecord']\n"
     ]
    }
   ],
   "source": [
    "\n",
    "tf_dummy_schema = {\n",
    "    \"correct\": (\"var_len\", \"bytes\"),\n",
    "    \"bad\": (\"var_len\", \"bytes\"),\n",
    "    \"correct_tokens\": (\"var_len\", \"bytes\"),\n",
    "    \"bad_tokens\": (\"var_len\", \"bytes\"),\n",
    "    \"correct_indexes\": (\"var_len\", \"int\"),\n",
    "    \"bad_indexes\": (\"var_len\", \"int\"),\n",
    "    \"source\": (\"var_len\", \"bytes\"),\n",
    "    \"unique_id\": (\"var_len\", \"float\"),\n",
    "    \"simple_float\": (\"var_len\", \"float\"),\n",
    "}\n",
    "\n",
    "data_1 = {\n",
    "    \"correct\": b\"Hi how are you\",\n",
    "    \"bad\": b\"Hi how are your?\",\n",
    "    \"correct_tokens\": [b\"hi\", b\"how\", b\"are\", b\"you\"],\n",
    "    \"bad_tokens\": [b\"hi\", b\"how\", b\"are\", b\"your?\"],\n",
    "    \"correct_indexes\": [1, 2, 3, 4, 5],\n",
    "    \"bad_indexes\": [1, 2, 3, 4, 5],\n",
    "    \"source\": b\"dummy_source\",\n",
    "    \"unique_id\": 1230344.0,\n",
    "    \"simple_float\": [0.1, 0.2, 0.3, 0.4],\n",
    "}\n",
    "\n",
    "tf_writer = TFWriter(tf_dummy_schema, \"test.tfrecord\", tag=\"train\", overwrite=True)\n",
    "\n",
    "for i in range(10000):\n",
    "    tf_writer.write_record(data_1)\n",
    "\n",
    "import glob\n",
    "\n",
    "all_files = glob.glob(\"test**\")\n",
    "print(\"All files\", all_files)\n",
    "tf_reader = TFReader(tf_dummy_schema, all_files)\n",
    "dataset = tf_reader.read_record()\n",
    "\n",
    "# Dummy example\n",
    "\n",
    "tf_dummy_schema = {\n",
    "    \"correct\": (\"var_len\", \"bytes\"),\n",
    "    \"bad\": (\"var_len\", \"bytes\"),\n",
    "    \"correct_tokens\": (\"var_len\", \"bytes\"),\n",
    "    \"bad_tokens\": (\"fixed_len\", \"bytes\", [4]),\n",
    "    \"correct_indexes\": (\"var_len\", \"int\"),\n",
    "    \"bad_indexes\": (\"var_len\", \"int\"),\n",
    "    \"source\": (\"fixed_len\", \"bytes\", []),\n",
    "    \"unique_id\": (\"fixed_len\", \"float\", []),\n",
    "    \"simple_float\": (\"fixed_len\", \"float\", [4]),\n",
    "}\n",
    "\n",
    "data_1 = {\n",
    "    \"correct\": b\"Hi how are you\",\n",
    "    \"bad\": b\"Hi how are your?\",\n",
    "    \"correct_tokens\": [b\"hi\", b\"how\", b\"are\", b\"you\"],\n",
    "    \"bad_tokens\": [b\"hi\", b\"how\", b\"your?\", b\":-)\"],\n",
    "    \"correct_indexes\": [1, 2, 3, 4, 5],\n",
    "    \"bad_indexes\": [1, 2, 3, 4, 5],\n",
    "    \"source\": b\"dummy_source\",\n",
    "    \"unique_id\": 1230344.0,\n",
    "    \"simple_float\": [0.1, 0.2, 0.3, 0.4],\n",
    "}\n",
    "\n",
    "data_2 = {\n",
    "    \"correct\": b\"Hi how are you\",\n",
    "    \"bad\": b\"Hi how are your?\",\n",
    "    \"correct_tokens\": [b\"hi\", b\"how\", b\"are\", b\"you\"],\n",
    "    \"bad_tokens\": [b\"hi\", b\"how\", b\"your?\", b\"are\"],\n",
    "    \"correct_indexes\": [1, 2, 3, 4, 5],\n",
    "    \"bad_indexes\": [1, 2, 3, 4, 5],\n",
    "    \"source\": b\"dummy_source\",\n",
    "    \"unique_id\": 1230344.0,\n",
    "    \"simple_float\": [0.1, 0.2, 0.3, 0.4],\n",
    "}\n",
    "\n",
    "tf_writer = TFWriter(tf_dummy_schema, \"test2.tfrecord\", tag=\"train\", overwrite=True)\n",
    "for i in range(1000):\n",
    "    tf_writer.write_record(data_1)\n",
    "    tf_writer.write_record(data_2)\n",
    "\n",
    "all_files = glob.glob(\"test2**\")\n",
    "print(\"All files\", all_files)\n",
    "tf_reader = TFReader(tf_dummy_schema, all_files, keys=[])\n",
    "dataset = tf_reader.read_record()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "spectacular-scout",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'bad': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are your?'], dtype=object)>, 'bad_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are you'], dtype=object)>, 'correct_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'are', b'you'], dtype=object)>, 'bad_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'your?', b':-)'], dtype=object)>, 'simple_float': <tf.Tensor: shape=(4,), dtype=float32, numpy=array([0.1, 0.2, 0.3, 0.4], dtype=float32)>, 'source': <tf.Tensor: shape=(), dtype=string, numpy=b'dummy_source'>, 'unique_id': <tf.Tensor: shape=(), dtype=float32, numpy=1230344.0>}\n",
      "{'bad': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are your?'], dtype=object)>, 'bad_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are you'], dtype=object)>, 'correct_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'are', b'you'], dtype=object)>, 'bad_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'your?', b':-)'], dtype=object)>, 'simple_float': <tf.Tensor: shape=(4,), dtype=float32, numpy=array([0.1, 0.2, 0.3, 0.4], dtype=float32)>, 'source': <tf.Tensor: shape=(), dtype=string, numpy=b'dummy_source'>, 'unique_id': <tf.Tensor: shape=(), dtype=float32, numpy=1230344.0>}\n",
      "{'bad': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are your?'], dtype=object)>, 'bad_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are you'], dtype=object)>, 'correct_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'are', b'you'], dtype=object)>, 'bad_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'your?', b'are'], dtype=object)>, 'simple_float': <tf.Tensor: shape=(4,), dtype=float32, numpy=array([0.1, 0.2, 0.3, 0.4], dtype=float32)>, 'source': <tf.Tensor: shape=(), dtype=string, numpy=b'dummy_source'>, 'unique_id': <tf.Tensor: shape=(), dtype=float32, numpy=1230344.0>}\n",
      "{'bad': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are your?'], dtype=object)>, 'bad_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are you'], dtype=object)>, 'correct_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'are', b'you'], dtype=object)>, 'bad_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'your?', b'are'], dtype=object)>, 'simple_float': <tf.Tensor: shape=(4,), dtype=float32, numpy=array([0.1, 0.2, 0.3, 0.4], dtype=float32)>, 'source': <tf.Tensor: shape=(), dtype=string, numpy=b'dummy_source'>, 'unique_id': <tf.Tensor: shape=(), dtype=float32, numpy=1230344.0>}\n",
      "{'bad': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are your?'], dtype=object)>, 'bad_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are you'], dtype=object)>, 'correct_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'are', b'you'], dtype=object)>, 'bad_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'your?', b':-)'], dtype=object)>, 'simple_float': <tf.Tensor: shape=(4,), dtype=float32, numpy=array([0.1, 0.2, 0.3, 0.4], dtype=float32)>, 'source': <tf.Tensor: shape=(), dtype=string, numpy=b'dummy_source'>, 'unique_id': <tf.Tensor: shape=(), dtype=float32, numpy=1230344.0>}\n",
      "{'bad': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are your?'], dtype=object)>, 'bad_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are you'], dtype=object)>, 'correct_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'are', b'you'], dtype=object)>, 'bad_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'your?', b':-)'], dtype=object)>, 'simple_float': <tf.Tensor: shape=(4,), dtype=float32, numpy=array([0.1, 0.2, 0.3, 0.4], dtype=float32)>, 'source': <tf.Tensor: shape=(), dtype=string, numpy=b'dummy_source'>, 'unique_id': <tf.Tensor: shape=(), dtype=float32, numpy=1230344.0>}\n",
      "{'bad': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are your?'], dtype=object)>, 'bad_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are you'], dtype=object)>, 'correct_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'are', b'you'], dtype=object)>, 'bad_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'your?', b':-)'], dtype=object)>, 'simple_float': <tf.Tensor: shape=(4,), dtype=float32, numpy=array([0.1, 0.2, 0.3, 0.4], dtype=float32)>, 'source': <tf.Tensor: shape=(), dtype=string, numpy=b'dummy_source'>, 'unique_id': <tf.Tensor: shape=(), dtype=float32, numpy=1230344.0>}\n",
      "{'bad': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are your?'], dtype=object)>, 'bad_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct': <tf.Tensor: shape=(1,), dtype=string, numpy=array([b'Hi how are you'], dtype=object)>, 'correct_indexes': <tf.Tensor: shape=(5,), dtype=int32, numpy=array([1, 2, 3, 4, 5], dtype=int32)>, 'correct_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'are', b'you'], dtype=object)>, 'bad_tokens': <tf.Tensor: shape=(4,), dtype=string, numpy=array([b'hi', b'how', b'your?', b':-)'], dtype=object)>, 'simple_float': <tf.Tensor: shape=(4,), dtype=float32, numpy=array([0.1, 0.2, 0.3, 0.4], dtype=float32)>, 'source': <tf.Tensor: shape=(), dtype=string, numpy=b'dummy_source'>, 'unique_id': <tf.Tensor: shape=(), dtype=float32, numpy=1230344.0>}\n"
     ]
    }
   ],
   "source": [
    "for item in dataset.take(8):\n",
    "    print(item)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
