googcheng commited on
Commit
994cc5a
1 Parent(s): 0b0187a

Upload 2 files

Browse files
Files changed (2) hide show
  1. Untitled.ipynb +183 -0
  2. subwords_tokenizer.ipynb +1769 -0
Untitled.ipynb ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "d67610d1-a6d9-420f-90d7-248d46b31697",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "loading file tokenizer.model from cache at /root/.cache/huggingface/hub/models--baichuan-inc--Baichuan-13B-Base/snapshots/0ef0739c7bdd34df954003ef76d80f3dabca2ff9/tokenizer.model\n",
14
+ "loading file added_tokens.json from cache at None\n",
15
+ "loading file special_tokens_map.json from cache at /root/.cache/huggingface/hub/models--baichuan-inc--Baichuan-13B-Base/snapshots/0ef0739c7bdd34df954003ef76d80f3dabca2ff9/special_tokens_map.json\n",
16
+ "loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--baichuan-inc--Baichuan-13B-Base/snapshots/0ef0739c7bdd34df954003ef76d80f3dabca2ff9/tokenizer_config.json\n",
17
+ "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--baichuan-inc--Baichuan-13B-Base/snapshots/0ef0739c7bdd34df954003ef76d80f3dabca2ff9/config.json\n",
18
+ "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--baichuan-inc--Baichuan-13B-Base/snapshots/0ef0739c7bdd34df954003ef76d80f3dabca2ff9/config.json\n",
19
+ "Model config BaichuanConfig {\n",
20
+ " \"_from_model_config\": true,\n",
21
+ " \"_name_or_path\": \"baichuan-inc/Baichuan-13B-Base\",\n",
22
+ " \"architectures\": [\n",
23
+ " \"BaichuanForCausalLM\"\n",
24
+ " ],\n",
25
+ " \"auto_map\": {\n",
26
+ " \"AutoConfig\": \"baichuan-inc/Baichuan-13B-Base--configuration_baichuan.BaichuanConfig\",\n",
27
+ " \"AutoModelForCausalLM\": \"baichuan-inc/Baichuan-13B-Base--modeling_baichuan.BaichuanForCausalLM\"\n",
28
+ " },\n",
29
+ " \"bos_token_id\": 1,\n",
30
+ " \"eos_token_id\": 2,\n",
31
+ " \"gradient_checkpointing\": [\n",
32
+ " false\n",
33
+ " ],\n",
34
+ " \"hidden_act\": \"silu\",\n",
35
+ " \"hidden_size\": 5120,\n",
36
+ " \"initializer_range\": 0.02,\n",
37
+ " \"intermediate_size\": 13696,\n",
38
+ " \"model_max_length\": 4096,\n",
39
+ " \"model_type\": \"baichuan\",\n",
40
+ " \"num_attention_heads\": 40,\n",
41
+ " \"num_hidden_layers\": 40,\n",
42
+ " \"pad_token_id\": 0,\n",
43
+ " \"rms_norm_eps\": 1e-06,\n",
44
+ " \"tie_word_embeddings\": false,\n",
45
+ " \"torch_dtype\": \"bfloat16\",\n",
46
+ " \"transformers_version\": \"4.32.1\",\n",
47
+ " \"use_cache\": true,\n",
48
+ " \"vocab_size\": 64000\n",
49
+ "}\n",
50
+ "\n"
51
+ ]
52
+ },
53
+ {
54
+ "name": "stdout",
55
+ "output_type": "stream",
56
+ "text": [
57
+ "[2023-08-31 19:08:00,343] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n"
58
+ ]
59
+ },
60
+ {
61
+ "name": "stderr",
62
+ "output_type": "stream",
63
+ "text": [
64
+ "loading weights file pytorch_model.bin from cache at /root/.cache/huggingface/hub/models--baichuan-inc--Baichuan-13B-Base/snapshots/0ef0739c7bdd34df954003ef76d80f3dabca2ff9/pytorch_model.bin.index.json\n",
65
+ "Generate config GenerationConfig {\n",
66
+ " \"_from_model_config\": true,\n",
67
+ " \"bos_token_id\": 1,\n",
68
+ " \"eos_token_id\": 2,\n",
69
+ " \"pad_token_id\": 0,\n",
70
+ " \"transformers_version\": \"4.32.1\"\n",
71
+ "}\n",
72
+ "\n"
73
+ ]
74
+ },
75
+ {
76
+ "data": {
77
+ "application/vnd.jupyter.widget-view+json": {
78
+ "model_id": "87e88732661e46cebbc8cc1ab0f93a77",
79
+ "version_major": 2,
80
+ "version_minor": 0
81
+ },
82
+ "text/plain": [
83
+ "Loading checkpoint shards: 0%| | 0/3 [00:00<?, ?it/s]"
84
+ ]
85
+ },
86
+ "metadata": {},
87
+ "output_type": "display_data"
88
+ },
89
+ {
90
+ "name": "stderr",
91
+ "output_type": "stream",
92
+ "text": [
93
+ "All model checkpoint weights were used when initializing BaichuanForCausalLM.\n",
94
+ "\n",
95
+ "All the weights of BaichuanForCausalLM were initialized from the model checkpoint at baichuan-inc/Baichuan-13B-Base.\n",
96
+ "If your task is similar to the task the model of the checkpoint was trained on, you can already use BaichuanForCausalLM for predictions without further training.\n",
97
+ "loading configuration file generation_config.json from cache at /root/.cache/huggingface/hub/models--baichuan-inc--Baichuan-13B-Base/snapshots/0ef0739c7bdd34df954003ef76d80f3dabca2ff9/generation_config.json\n",
98
+ "Generate config GenerationConfig {\n",
99
+ " \"_from_model_config\": true,\n",
100
+ " \"bos_token_id\": 1,\n",
101
+ " \"eos_token_id\": 2,\n",
102
+ " \"pad_token_id\": 0,\n",
103
+ " \"transformers_version\": \"4.32.1\"\n",
104
+ "}\n",
105
+ "\n",
106
+ "Generate config GenerationConfig {\n",
107
+ " \"_from_model_config\": true,\n",
108
+ " \"bos_token_id\": 1,\n",
109
+ " \"eos_token_id\": 2,\n",
110
+ " \"pad_token_id\": 0,\n",
111
+ " \"transformers_version\": \"4.32.1\"\n",
112
+ "}\n",
113
+ "\n",
114
+ "/root/miniconda3/lib/python3.8/site-packages/transformers/generation/configuration_utils.py:367: UserWarning: `do_sample` is set to `False`. However, `top_p` is set to `0.92` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `top_p`.\n",
115
+ " warnings.warn(\n"
116
+ ]
117
+ },
118
+ {
119
+ "name": "stdout",
120
+ "output_type": "stream",
121
+ "text": [
122
+ "begin to gen\n"
123
+ ]
124
+ }
125
+ ],
126
+ "source": [
127
+ "import transformers\n",
128
+ "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
129
+ "\n",
130
+ "transformers.logging.set_verbosity_info()\n",
131
+ "tokenizer = AutoTokenizer.from_pretrained(\"baichuan-inc/Baichuan-13B-Base\", trust_remote_code=True)\n",
132
+ "model = AutoModelForCausalLM.from_pretrained(\"baichuan-inc/Baichuan-13B-Base\", device_map=\"auto\", trust_remote_code=True)\n",
133
+ "inputs = tokenizer('make lyric:想带给你一句话', return_tensors='pt')\n",
134
+ "inputs = inputs.to('cuda:0')\n",
135
+ "print(\"begin to gen\")\n",
136
+ "pred = model.generate(**inputs, max_new_tokens=64,repetition_penalty=1.1, top_p=0.92)\n",
137
+ "print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))\n"
138
+ ]
139
+ },
140
+ {
141
+ "cell_type": "code",
142
+ "execution_count": null,
143
+ "id": "91c204d0-b141-4dea-a468-8f74b1361782",
144
+ "metadata": {},
145
+ "outputs": [],
146
+ "source": [
147
+ "from peft import PeftModel\n",
148
+ "lora_model = PeftModel.from_pretrained(model, \"./autodl-tmp/LLaMA-Efficient-Tuning/pppfuck\")\n",
149
+ "lora_pred = lora_model.generate(**inputs, max_new_tokens=128,repetition_penalty=1.1)\n",
150
+ "print(tokenizer.decode(lora_pred.cpu()[0], skip_special_tokens=True))"
151
+ ]
152
+ },
153
+ {
154
+ "cell_type": "code",
155
+ "execution_count": null,
156
+ "id": "84885258-cc93-4e08-a394-d2c5283a3fb3",
157
+ "metadata": {},
158
+ "outputs": [],
159
+ "source": []
160
+ }
161
+ ],
162
+ "metadata": {
163
+ "kernelspec": {
164
+ "display_name": "Python 3 (ipykernel)",
165
+ "language": "python",
166
+ "name": "python3"
167
+ },
168
+ "language_info": {
169
+ "codemirror_mode": {
170
+ "name": "ipython",
171
+ "version": 3
172
+ },
173
+ "file_extension": ".py",
174
+ "mimetype": "text/x-python",
175
+ "name": "python",
176
+ "nbconvert_exporter": "python",
177
+ "pygments_lexer": "ipython3",
178
+ "version": "3.8.10"
179
+ }
180
+ },
181
+ "nbformat": 4,
182
+ "nbformat_minor": 5
183
+ }
subwords_tokenizer.ipynb ADDED
@@ -0,0 +1,1769 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "s_qNSzzyaCbD"
7
+ },
8
+ "source": [
9
+ "##### Copyright 2019 The TensorFlow Authors."
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": 1,
15
+ "metadata": {
16
+ "cellView": "form",
17
+ "execution": {
18
+ "iopub.execute_input": "2023-08-11T11:07:36.887650Z",
19
+ "iopub.status.busy": "2023-08-11T11:07:36.886995Z",
20
+ "iopub.status.idle": "2023-08-11T11:07:36.891012Z",
21
+ "shell.execute_reply": "2023-08-11T11:07:36.890336Z"
22
+ },
23
+ "id": "jmjh290raIky"
24
+ },
25
+ "outputs": [],
26
+ "source": [
27
+ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
28
+ "# you may not use this file except in compliance with the License.\n",
29
+ "# You may obtain a copy of the License at\n",
30
+ "#\n",
31
+ "# https://www.apache.org/licenses/LICENSE-2.0\n",
32
+ "#\n",
33
+ "# Unless required by applicable law or agreed to in writing, software\n",
34
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
35
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
36
+ "# See the License for the specific language governing permissions and\n",
37
+ "# limitations under the License."
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "markdown",
42
+ "metadata": {
43
+ "id": "AOpGoE2T-YXS"
44
+ },
45
+ "source": [
46
+ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n",
47
+ " <td>\n",
48
+ " <a target=\"_blank\" href=\"https://www.tensorflow.org/text/guide/subwords_tokenizer\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n",
49
+ " </td>\n",
50
+ " <td>\n",
51
+ " <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/text/blob/master/docs/guide/subwords_tokenizer.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n",
52
+ " </td>\n",
53
+ " <td>\n",
54
+ " <a target=\"_blank\" href=\"https://github.com/tensorflow/text/blob/master/docs/guide/subwords_tokenizer.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n",
55
+ " </td>\n",
56
+ " <td>\n",
57
+ " <a href=\"https://storage.googleapis.com/tensorflow_docs/text/docs/guide/subwords_tokenizer.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n",
58
+ " </td>\n",
59
+ "</table>"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "markdown",
64
+ "metadata": {
65
+ "id": "ES8iTKcdPCLt"
66
+ },
67
+ "source": [
68
+ "# Subword tokenizers\n",
69
+ "\n",
70
+ "This tutorial demonstrates how to generate a subword vocabulary from a dataset, and use it to build a `text.BertTokenizer` from the vocabulary.\n",
71
+ "\n",
72
+ "The main advantage of a subword tokenizer is that it interpolates between word-based and character-based tokenization. Common words get a slot in the vocabulary, but the tokenizer can fall back to word pieces and individual characters for unknown words.\n",
73
+ "\n",
74
+ "Objective: At the end of this tutorial you'll have built a complete end-to-end wordpiece tokenizer and detokenizer from scratch, and saved it as a `saved_model` that you can load and use in this [translation tutorial](https://tensorflow.org/text/tutorials/transformer)."
75
+ ]
76
+ },
77
+ {
78
+ "cell_type": "markdown",
79
+ "metadata": {
80
+ "id": "BHfrtG1YPJdR"
81
+ },
82
+ "source": [
83
+ "## Overview"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "markdown",
88
+ "metadata": {
89
+ "id": "iIMuBnQO6ZoV"
90
+ },
91
+ "source": [
92
+ "The `tensorflow_text` package includes TensorFlow implementations of many common tokenizers. This includes three subword-style tokenizers:\n",
93
+ "\n",
94
+ "* `text.BertTokenizer` - The `BertTokenizer` class is a higher level interface. It includes BERT's token splitting algorithm and a `WordPieceTokenizer`. It takes **sentences** as input and returns **token-IDs**.\n",
95
+ "* `text.WordpieceTokenizer` - The `WordPieceTokenizer` class is a lower level interface. It only implements the [WordPiece algorithm](#applying_wordpiece). You must standardize and split the text into words before calling it. It takes **words** as input and returns token-IDs.\n",
96
+ "* `text.SentencepieceTokenizer` - The `SentencepieceTokenizer` requires a more complex setup. Its initializer requires a pre-trained sentencepiece model. See the [google/sentencepiece repository](https://github.com/google/sentencepiece#train-sentencepiece-model) for instructions on how to build one of these models. It can accept **sentences** as input when tokenizing.\n",
97
+ "\n",
98
+ "This tutorial builds a Wordpiece vocabulary in a top down manner, starting from existing words. This process doesn't work for Japanese, Chinese, or Korean since these languages don't have clear multi-character units. To tokenize these languages consider using `text.SentencepieceTokenizer`, `text.UnicodeCharTokenizer` or [this approach](https://tfhub.dev/google/zh_segmentation/1). "
99
+ ]
100
+ },
101
+ {
102
+ "cell_type": "markdown",
103
+ "metadata": {
104
+ "id": "swymtxpl7W7w"
105
+ },
106
+ "source": [
107
+ "## Setup"
108
+ ]
109
+ },
110
+ {
111
+ "cell_type": "code",
112
+ "execution_count": 2,
113
+ "metadata": {
114
+ "execution": {
115
+ "iopub.execute_input": "2023-08-11T11:07:36.894748Z",
116
+ "iopub.status.busy": "2023-08-11T11:07:36.894313Z",
117
+ "iopub.status.idle": "2023-08-11T11:08:05.647249Z",
118
+ "shell.execute_reply": "2023-08-11T11:08:05.646422Z"
119
+ },
120
+ "id": "rJTYbk1E9QOk"
121
+ },
122
+ "outputs": [
123
+ {
124
+ "name": "stdout",
125
+ "output_type": "stream",
126
+ "text": [
127
+ "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\n",
128
+ "tensorflow-datasets 4.9.2 requires protobuf>=3.20, but you have protobuf 3.19.6 which is incompatible.\r\n",
129
+ "tensorflow-metadata 1.14.0 requires protobuf<4.21,>=3.20.3, but you have protobuf 3.19.6 which is incompatible.\u001b[0m\u001b[31m\r\n",
130
+ "\u001b[0m"
131
+ ]
132
+ }
133
+ ],
134
+ "source": [
135
+ "!pip install -q -U \"tensorflow-text==2.11.*\""
136
+ ]
137
+ },
138
+ {
139
+ "cell_type": "code",
140
+ "execution_count": 3,
141
+ "metadata": {
142
+ "execution": {
143
+ "iopub.execute_input": "2023-08-11T11:08:05.651342Z",
144
+ "iopub.status.busy": "2023-08-11T11:08:05.651073Z",
145
+ "iopub.status.idle": "2023-08-11T11:08:08.314047Z",
146
+ "shell.execute_reply": "2023-08-11T11:08:08.313134Z"
147
+ },
148
+ "id": "XFG0NDRu5mYQ"
149
+ },
150
+ "outputs": [
151
+ {
152
+ "name": "stdout",
153
+ "output_type": "stream",
154
+ "text": [
155
+ "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\n",
156
+ "tensorflow 2.11.1 requires protobuf<3.20,>=3.9.2, but you have protobuf 3.20.3 which is incompatible.\u001b[0m\u001b[31m\r\n",
157
+ "\u001b[0m"
158
+ ]
159
+ }
160
+ ],
161
+ "source": [
162
+ "!pip install -q tensorflow_datasets"
163
+ ]
164
+ },
165
+ {
166
+ "cell_type": "code",
167
+ "execution_count": 4,
168
+ "metadata": {
169
+ "execution": {
170
+ "iopub.execute_input": "2023-08-11T11:08:08.317980Z",
171
+ "iopub.status.busy": "2023-08-11T11:08:08.317735Z",
172
+ "iopub.status.idle": "2023-08-11T11:08:11.244419Z",
173
+ "shell.execute_reply": "2023-08-11T11:08:11.243662Z"
174
+ },
175
+ "id": "JjJJyJTZYebt"
176
+ },
177
+ "outputs": [
178
+ {
179
+ "name": "stderr",
180
+ "output_type": "stream",
181
+ "text": [
182
+ "2023-08-11 11:08:10.432347: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory\n",
183
+ "2023-08-11 11:08:10.432451: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory\n",
184
+ "2023-08-11 11:08:10.432460: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n"
185
+ ]
186
+ }
187
+ ],
188
+ "source": [
189
+ "import collections\n",
190
+ "import os\n",
191
+ "import pathlib\n",
192
+ "import re\n",
193
+ "import string\n",
194
+ "import sys\n",
195
+ "import tempfile\n",
196
+ "import time\n",
197
+ "\n",
198
+ "import numpy as np\n",
199
+ "import matplotlib.pyplot as plt\n",
200
+ "\n",
201
+ "import tensorflow_datasets as tfds\n",
202
+ "import tensorflow_text as text\n",
203
+ "import tensorflow as tf"
204
+ ]
205
+ },
206
+ {
207
+ "cell_type": "code",
208
+ "execution_count": 5,
209
+ "metadata": {
210
+ "execution": {
211
+ "iopub.execute_input": "2023-08-11T11:08:11.248812Z",
212
+ "iopub.status.busy": "2023-08-11T11:08:11.248023Z",
213
+ "iopub.status.idle": "2023-08-11T11:08:11.251713Z",
214
+ "shell.execute_reply": "2023-08-11T11:08:11.251084Z"
215
+ },
216
+ "id": "QZi9RstHxO_Z"
217
+ },
218
+ "outputs": [],
219
+ "source": [
220
+ "tf.get_logger().setLevel('ERROR')\n",
221
+ "pwd = pathlib.Path.cwd()"
222
+ ]
223
+ },
224
+ {
225
+ "cell_type": "markdown",
226
+ "metadata": {
227
+ "id": "wzJbGA5N5mXr"
228
+ },
229
+ "source": [
230
+ "## Download the dataset"
231
+ ]
232
+ },
233
+ {
234
+ "cell_type": "markdown",
235
+ "metadata": {
236
+ "id": "kC9TeTd47j8p"
237
+ },
238
+ "source": [
239
+ "Fetch the Portuguese/English translation dataset from [tfds](https://tensorflow.org/datasets):"
240
+ ]
241
+ },
242
+ {
243
+ "cell_type": "code",
244
+ "execution_count": 6,
245
+ "metadata": {
246
+ "execution": {
247
+ "iopub.execute_input": "2023-08-11T11:08:11.255399Z",
248
+ "iopub.status.busy": "2023-08-11T11:08:11.254796Z",
249
+ "iopub.status.idle": "2023-08-11T11:08:16.876922Z",
250
+ "shell.execute_reply": "2023-08-11T11:08:16.876263Z"
251
+ },
252
+ "id": "qDaAOTKHNy8e"
253
+ },
254
+ "outputs": [],
255
+ "source": [
256
+ "examples, metadata = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True,\n",
257
+ " as_supervised=True)\n",
258
+ "train_examples, val_examples = examples['train'], examples['validation'] "
259
+ ]
260
+ },
261
+ {
262
+ "cell_type": "markdown",
263
+ "metadata": {
264
+ "id": "5GHc3O2W8Hgg"
265
+ },
266
+ "source": [
267
+ "This dataset produces Portuguese/English sentence pairs:"
268
+ ]
269
+ },
270
+ {
271
+ "cell_type": "code",
272
+ "execution_count": 7,
273
+ "metadata": {
274
+ "execution": {
275
+ "iopub.execute_input": "2023-08-11T11:08:16.880845Z",
276
+ "iopub.status.busy": "2023-08-11T11:08:16.880611Z",
277
+ "iopub.status.idle": "2023-08-11T11:08:17.469768Z",
278
+ "shell.execute_reply": "2023-08-11T11:08:17.469027Z"
279
+ },
280
+ "id": "-_ezZT8w8GqD"
281
+ },
282
+ "outputs": [
283
+ {
284
+ "name": "stdout",
285
+ "output_type": "stream",
286
+ "text": [
287
+ "Portuguese: e quando melhoramos a procura , tiramos a única vantagem da impressão , que é a serendipidade .\n",
288
+ "English: and when you improve searchability , you actually take away the one advantage of print , which is serendipity .\n"
289
+ ]
290
+ }
291
+ ],
292
+ "source": [
293
+ "for pt, en in train_examples.take(1):\n",
294
+ " print(\"Portuguese: \", pt.numpy().decode('utf-8'))\n",
295
+ " print(\"English: \", en.numpy().decode('utf-8'))"
296
+ ]
297
+ },
298
+ {
299
+ "cell_type": "markdown",
300
+ "metadata": {
301
+ "id": "nNGwm45vKttj"
302
+ },
303
+ "source": [
304
+ "Note a few things about the example sentences above:\n",
305
+ "* They're lower case.\n",
306
+ "* There are spaces around the punctuation.\n",
307
+ "* It's not clear if or what unicode normalization is being used."
308
+ ]
309
+ },
310
+ {
311
+ "cell_type": "code",
312
+ "execution_count": 8,
313
+ "metadata": {
314
+ "execution": {
315
+ "iopub.execute_input": "2023-08-11T11:08:17.473707Z",
316
+ "iopub.status.busy": "2023-08-11T11:08:17.473026Z",
317
+ "iopub.status.idle": "2023-08-11T11:08:17.505169Z",
318
+ "shell.execute_reply": "2023-08-11T11:08:17.504577Z"
319
+ },
320
+ "id": "Pm5Eah5F6B1I"
321
+ },
322
+ "outputs": [],
323
+ "source": [
324
+ "train_en = train_examples.map(lambda pt, en: en)\n",
325
+ "train_pt = train_examples.map(lambda pt, en: pt)"
326
+ ]
327
+ },
328
+ {
329
+ "cell_type": "markdown",
330
+ "metadata": {
331
+ "id": "VCD57yALsF0D"
332
+ },
333
+ "source": [
334
+ "## Generate the vocabulary\n",
335
+ "\n",
336
+ "This section generates a wordpiece vocabulary from a dataset. If you already have a vocabulary file and just want to see how to build a `text.BertTokenizer` or `text.WordpieceTokenizer` tokenizer with it then you can skip ahead to the [Build the tokenizer](#build_the_tokenizer) section."
337
+ ]
338
+ },
339
+ {
340
+ "cell_type": "markdown",
341
+ "metadata": {
342
+ "id": "v4CX7_KlO8lX"
343
+ },
344
+ "source": [
345
+ "Note: The vocabulary generation code used in this tutorial is optimized for **simplicity**. If you need a more scalable solution consider using the Apache Beam implementation available in [tools/wordpiece_vocab/generate_vocab.py](https://github.com/tensorflow/text/blob/master/tensorflow_text/tools/wordpiece_vocab/generate_vocab.py)"
346
+ ]
347
+ },
348
+ {
349
+ "cell_type": "markdown",
350
+ "metadata": {
351
+ "id": "R74W3QabgWmX"
352
+ },
353
+ "source": [
354
+ "The vocabulary generation code is included in the `tensorflow_text` pip package. It is not imported by default , you need to manually import it:"
355
+ ]
356
+ },
357
+ {
358
+ "cell_type": "code",
359
+ "execution_count": 9,
360
+ "metadata": {
361
+ "execution": {
362
+ "iopub.execute_input": "2023-08-11T11:08:17.508754Z",
363
+ "iopub.status.busy": "2023-08-11T11:08:17.508517Z",
364
+ "iopub.status.idle": "2023-08-11T11:08:17.513174Z",
365
+ "shell.execute_reply": "2023-08-11T11:08:17.512549Z"
366
+ },
367
+ "id": "iqX1fYdpnLS2"
368
+ },
369
+ "outputs": [],
370
+ "source": [
371
+ "from tensorflow_text.tools.wordpiece_vocab import bert_vocab_from_dataset as bert_vocab"
372
+ ]
373
+ },
374
+ {
375
+ "cell_type": "markdown",
376
+ "metadata": {
377
+ "id": "HaWSnj8xFgI7"
378
+ },
379
+ "source": [
380
+ "The `bert_vocab.bert_vocab_from_dataset` function will generate the vocabulary. \n",
381
+ "\n",
382
+ "There are many arguments you can set to adjust its behavior. For this tutorial, you'll mostly use the defaults. If you want to learn more about the options, first read about [the algorithm](#algorithm), and then have a look at [the code](https://github.com/tensorflow/text/blob/master/tensorflow_text/tools/wordpiece_vocab/bert_vocab_from_dataset.py).\n"
383
+ ]
384
+ },
385
+ {
386
+ "cell_type": "markdown",
387
+ "metadata": {
388
+ "id": "6gTty2Wh-dHm"
389
+ },
390
+ "source": [
391
+ "This takes about 2 minutes."
392
+ ]
393
+ },
394
+ {
395
+ "cell_type": "code",
396
+ "execution_count": 10,
397
+ "metadata": {
398
+ "execution": {
399
+ "iopub.execute_input": "2023-08-11T11:08:17.516747Z",
400
+ "iopub.status.busy": "2023-08-11T11:08:17.516292Z",
401
+ "iopub.status.idle": "2023-08-11T11:08:17.519968Z",
402
+ "shell.execute_reply": "2023-08-11T11:08:17.519362Z"
403
+ },
404
+ "id": "FwFzYjBy-h8W"
405
+ },
406
+ "outputs": [],
407
+ "source": [
408
+ "bert_tokenizer_params=dict(lower_case=True)\n",
409
+ "reserved_tokens=[\"[PAD]\", \"[UNK]\", \"[START]\", \"[END]\"]\n",
410
+ "\n",
411
+ "bert_vocab_args = dict(\n",
412
+ " # The target vocabulary size\n",
413
+ " vocab_size = 8000,\n",
414
+ " # Reserved tokens that must be included in the vocabulary\n",
415
+ " reserved_tokens=reserved_tokens,\n",
416
+ " # Arguments for `text.BertTokenizer`\n",
417
+ " bert_tokenizer_params=bert_tokenizer_params,\n",
418
+ " # Arguments for `wordpiece_vocab.wordpiece_tokenizer_learner_lib.learn`\n",
419
+ " learn_params={},\n",
420
+ ")"
421
+ ]
422
+ },
423
+ {
424
+ "cell_type": "code",
425
+ "execution_count": 11,
426
+ "metadata": {
427
+ "execution": {
428
+ "iopub.execute_input": "2023-08-11T11:08:17.523306Z",
429
+ "iopub.status.busy": "2023-08-11T11:08:17.522789Z",
430
+ "iopub.status.idle": "2023-08-11T11:09:38.100721Z",
431
+ "shell.execute_reply": "2023-08-11T11:09:38.099954Z"
432
+ },
433
+ "id": "PMN6Lli_3sJW"
434
+ },
435
+ "outputs": [
436
+ {
437
+ "name": "stdout",
438
+ "output_type": "stream",
439
+ "text": [
440
+ "CPU times: user 1min 24s, sys: 2.83 s, total: 1min 27s\n",
441
+ "Wall time: 1min 20s\n"
442
+ ]
443
+ }
444
+ ],
445
+ "source": [
446
+ "%%time\n",
447
+ "pt_vocab = bert_vocab.bert_vocab_from_dataset(\n",
448
+ " train_pt.batch(1000).prefetch(2),\n",
449
+ " **bert_vocab_args\n",
450
+ ")"
451
+ ]
452
+ },
453
+ {
454
+ "cell_type": "markdown",
455
+ "metadata": {
456
+ "id": "3Cl4d2O34gkH"
457
+ },
458
+ "source": [
459
+ "Here are some slices of the resulting vocabulary."
460
+ ]
461
+ },
462
+ {
463
+ "cell_type": "code",
464
+ "execution_count": 12,
465
+ "metadata": {
466
+ "execution": {
467
+ "iopub.execute_input": "2023-08-11T11:09:38.104184Z",
468
+ "iopub.status.busy": "2023-08-11T11:09:38.103928Z",
469
+ "iopub.status.idle": "2023-08-11T11:09:38.108100Z",
470
+ "shell.execute_reply": "2023-08-11T11:09:38.107466Z"
471
+ },
472
+ "id": "mfaPmX54FvhW"
473
+ },
474
+ "outputs": [
475
+ {
476
+ "name": "stdout",
477
+ "output_type": "stream",
478
+ "text": [
479
+ "['[PAD]', '[UNK]', '[START]', '[END]', '!', '#', '$', '%', '&', \"'\"]\n",
480
+ "['no', 'por', 'mais', 'na', 'eu', 'esta', 'muito', 'isso', 'isto', 'sao']\n",
481
+ "['90', 'desse', 'efeito', 'malaria', 'normalmente', 'palestra', 'recentemente', '##nca', 'bons', 'chave']\n",
482
+ "['##–', '##—', '##‘', '##’', '##“', '##”', '##⁄', '##€', '##♪', '##♫']\n"
483
+ ]
484
+ }
485
+ ],
486
+ "source": [
487
+ "print(pt_vocab[:10])\n",
488
+ "print(pt_vocab[100:110])\n",
489
+ "print(pt_vocab[1000:1010])\n",
490
+ "print(pt_vocab[-10:])"
491
+ ]
492
+ },
493
+ {
494
+ "cell_type": "markdown",
495
+ "metadata": {
496
+ "id": "owkP3wbYVQv0"
497
+ },
498
+ "source": [
499
+ "Write a vocabulary file:"
500
+ ]
501
+ },
502
+ {
503
+ "cell_type": "code",
504
+ "execution_count": 13,
505
+ "metadata": {
506
+ "execution": {
507
+ "iopub.execute_input": "2023-08-11T11:09:38.111553Z",
508
+ "iopub.status.busy": "2023-08-11T11:09:38.111046Z",
509
+ "iopub.status.idle": "2023-08-11T11:09:38.114779Z",
510
+ "shell.execute_reply": "2023-08-11T11:09:38.114205Z"
511
+ },
512
+ "id": "VY6v1ThkKDyZ"
513
+ },
514
+ "outputs": [],
515
+ "source": [
516
+ "def write_vocab_file(filepath, vocab):\n",
517
+ " with open(filepath, 'w') as f:\n",
518
+ " for token in vocab:\n",
519
+ " print(token, file=f)"
520
+ ]
521
+ },
522
+ {
523
+ "cell_type": "code",
524
+ "execution_count": 14,
525
+ "metadata": {
526
+ "execution": {
527
+ "iopub.execute_input": "2023-08-11T11:09:38.117753Z",
528
+ "iopub.status.busy": "2023-08-11T11:09:38.117326Z",
529
+ "iopub.status.idle": "2023-08-11T11:09:38.124338Z",
530
+ "shell.execute_reply": "2023-08-11T11:09:38.123750Z"
531
+ },
532
+ "id": "X_TR5U1xWvAV"
533
+ },
534
+ "outputs": [],
535
+ "source": [
536
+ "write_vocab_file('pt_vocab.txt', pt_vocab)"
537
+ ]
538
+ },
539
+ {
540
+ "cell_type": "markdown",
541
+ "metadata": {
542
+ "id": "0ag3qcx54nii"
543
+ },
544
+ "source": [
545
+ "Use that function to generate a vocabulary from the english data:"
546
+ ]
547
+ },
548
+ {
549
+ "cell_type": "code",
550
+ "execution_count": 15,
551
+ "metadata": {
552
+ "execution": {
553
+ "iopub.execute_input": "2023-08-11T11:09:38.127459Z",
554
+ "iopub.status.busy": "2023-08-11T11:09:38.126876Z",
555
+ "iopub.status.idle": "2023-08-11T11:10:33.360225Z",
556
+ "shell.execute_reply": "2023-08-11T11:10:33.359413Z"
557
+ },
558
+ "id": "R3cMumvHWWtl"
559
+ },
560
+ "outputs": [
561
+ {
562
+ "name": "stdout",
563
+ "output_type": "stream",
564
+ "text": [
565
+ "CPU times: user 59.5 s, sys: 2.2 s, total: 1min 1s\n",
566
+ "Wall time: 55.2 s\n"
567
+ ]
568
+ }
569
+ ],
570
+ "source": [
571
+ "%%time\n",
572
+ "en_vocab = bert_vocab.bert_vocab_from_dataset(\n",
573
+ " train_en.batch(1000).prefetch(2),\n",
574
+ " **bert_vocab_args\n",
575
+ ")\n"
576
+ ]
577
+ },
578
+ {
579
+ "cell_type": "code",
580
+ "execution_count": 16,
581
+ "metadata": {
582
+ "execution": {
583
+ "iopub.execute_input": "2023-08-11T11:10:33.363881Z",
584
+ "iopub.status.busy": "2023-08-11T11:10:33.363324Z",
585
+ "iopub.status.idle": "2023-08-11T11:10:33.367548Z",
586
+ "shell.execute_reply": "2023-08-11T11:10:33.366890Z"
587
+ },
588
+ "id": "NxOpzMd8ol5B"
589
+ },
590
+ "outputs": [
591
+ {
592
+ "name": "stdout",
593
+ "output_type": "stream",
594
+ "text": [
595
+ "['[PAD]', '[UNK]', '[START]', '[END]', '!', '#', '$', '%', '&', \"'\"]\n",
596
+ "['as', 'all', 'at', 'one', 'people', 're', 'like', 'if', 'our', 'from']\n",
597
+ "['choose', 'consider', 'extraordinary', 'focus', 'generation', 'killed', 'patterns', 'putting', 'scientific', 'wait']\n",
598
+ "['##_', '##`', '##ย', '##ร', '##อ', '##–', '##—', '##’', '##♪', '##♫']\n"
599
+ ]
600
+ }
601
+ ],
602
+ "source": [
603
+ "print(en_vocab[:10])\n",
604
+ "print(en_vocab[100:110])\n",
605
+ "print(en_vocab[1000:1010])\n",
606
+ "print(en_vocab[-10:])"
607
+ ]
608
+ },
609
+ {
610
+ "cell_type": "markdown",
611
+ "metadata": {
612
+ "id": "ck3LG_f34wCs"
613
+ },
614
+ "source": [
615
+ "Here are the two vocabulary files:"
616
+ ]
617
+ },
618
+ {
619
+ "cell_type": "code",
620
+ "execution_count": 17,
621
+ "metadata": {
622
+ "execution": {
623
+ "iopub.execute_input": "2023-08-11T11:10:33.370842Z",
624
+ "iopub.status.busy": "2023-08-11T11:10:33.370307Z",
625
+ "iopub.status.idle": "2023-08-11T11:10:33.376675Z",
626
+ "shell.execute_reply": "2023-08-11T11:10:33.376105Z"
627
+ },
628
+ "id": "xfc2jxPznM6H"
629
+ },
630
+ "outputs": [],
631
+ "source": [
632
+ "write_vocab_file('en_vocab.txt', en_vocab)"
633
+ ]
634
+ },
635
+ {
636
+ "cell_type": "code",
637
+ "execution_count": 18,
638
+ "metadata": {
639
+ "execution": {
640
+ "iopub.execute_input": "2023-08-11T11:10:33.379857Z",
641
+ "iopub.status.busy": "2023-08-11T11:10:33.379374Z",
642
+ "iopub.status.idle": "2023-08-11T11:10:33.575341Z",
643
+ "shell.execute_reply": "2023-08-11T11:10:33.574189Z"
644
+ },
645
+ "id": "djehfEL6Zn-I"
646
+ },
647
+ "outputs": [
648
+ {
649
+ "name": "stdout",
650
+ "output_type": "stream",
651
+ "text": [
652
+ "en_vocab.txt pt_vocab.txt\r\n"
653
+ ]
654
+ }
655
+ ],
656
+ "source": [
657
+ "!ls *.txt"
658
+ ]
659
+ },
660
+ {
661
+ "cell_type": "markdown",
662
+ "metadata": {
663
+ "id": "Vb5ddYLTBJhk"
664
+ },
665
+ "source": [
666
+ "## Build the tokenizer\n",
667
+ "<a id=\"build_the_tokenizer\"></a>"
668
+ ]
669
+ },
670
+ {
671
+ "cell_type": "markdown",
672
+ "metadata": {
673
+ "id": "_qgp5gvR-2tQ"
674
+ },
675
+ "source": [
676
+ "The `text.BertTokenizer` can be initialized by passing the vocabulary file's path as the first argument (see the section on [tf.lookup](#tf.lookup) for other options): "
677
+ ]
678
+ },
679
+ {
680
+ "cell_type": "code",
681
+ "execution_count": 19,
682
+ "metadata": {
683
+ "execution": {
684
+ "iopub.execute_input": "2023-08-11T11:10:33.579533Z",
685
+ "iopub.status.busy": "2023-08-11T11:10:33.579240Z",
686
+ "iopub.status.idle": "2023-08-11T11:10:33.593614Z",
687
+ "shell.execute_reply": "2023-08-11T11:10:33.592971Z"
688
+ },
689
+ "id": "gdMpt9ZEjVGu"
690
+ },
691
+ "outputs": [],
692
+ "source": [
693
+ "pt_tokenizer = text.BertTokenizer('pt_vocab.txt', **bert_tokenizer_params)\n",
694
+ "en_tokenizer = text.BertTokenizer('en_vocab.txt', **bert_tokenizer_params)"
695
+ ]
696
+ },
697
+ {
698
+ "cell_type": "markdown",
699
+ "metadata": {
700
+ "id": "BhPZafCUds86"
701
+ },
702
+ "source": [
703
+ "Now you can use it to encode some text. Take a batch of 3 examples from the english data:"
704
+ ]
705
+ },
706
+ {
707
+ "cell_type": "code",
708
+ "execution_count": 20,
709
+ "metadata": {
710
+ "execution": {
711
+ "iopub.execute_input": "2023-08-11T11:10:33.597123Z",
712
+ "iopub.status.busy": "2023-08-11T11:10:33.596621Z",
713
+ "iopub.status.idle": "2023-08-11T11:10:33.945079Z",
714
+ "shell.execute_reply": "2023-08-11T11:10:33.944380Z"
715
+ },
716
+ "id": "NKF0QJjtUm9T"
717
+ },
718
+ "outputs": [
719
+ {
720
+ "name": "stdout",
721
+ "output_type": "stream",
722
+ "text": [
723
+ "b'and when you improve searchability , you actually take away the one advantage of print , which is serendipity .'\n",
724
+ "b'but what if it were active ?'\n",
725
+ "b\"but they did n't test for curiosity .\"\n"
726
+ ]
727
+ }
728
+ ],
729
+ "source": [
730
+ "for pt_examples, en_examples in train_examples.batch(3).take(1):\n",
731
+ " for ex in en_examples:\n",
732
+ " print(ex.numpy())"
733
+ ]
734
+ },
735
+ {
736
+ "cell_type": "markdown",
737
+ "metadata": {
738
+ "id": "k9OEIBWopMxW"
739
+ },
740
+ "source": [
741
+ "Run it through the `BertTokenizer.tokenize` method. Initially, this returns a `tf.RaggedTensor` with axes `(batch, word, word-piece)`:"
742
+ ]
743
+ },
744
+ {
745
+ "cell_type": "code",
746
+ "execution_count": 21,
747
+ "metadata": {
748
+ "execution": {
749
+ "iopub.execute_input": "2023-08-11T11:10:33.948777Z",
750
+ "iopub.status.busy": "2023-08-11T11:10:33.948275Z",
751
+ "iopub.status.idle": "2023-08-11T11:10:34.005473Z",
752
+ "shell.execute_reply": "2023-08-11T11:10:34.004898Z"
753
+ },
754
+ "id": "AeTM81lAc8q1"
755
+ },
756
+ "outputs": [
757
+ {
758
+ "name": "stdout",
759
+ "output_type": "stream",
760
+ "text": [
761
+ "[72, 117, 79, 1259, 1491, 2362, 13, 79, 150, 184, 311, 71, 103, 2308, 74, 2679, 13, 148, 80, 55, 4840, 1434, 2423, 540, 15]\n",
762
+ "[87, 90, 107, 76, 129, 1852, 30]\n",
763
+ "[87, 83, 149, 50, 9, 56, 664, 85, 2512, 15]\n"
764
+ ]
765
+ }
766
+ ],
767
+ "source": [
768
+ "# Tokenize the examples -> (batch, word, word-piece)\n",
769
+ "token_batch = en_tokenizer.tokenize(en_examples)\n",
770
+ "# Merge the word and word-piece axes -> (batch, tokens)\n",
771
+ "token_batch = token_batch.merge_dims(-2,-1)\n",
772
+ "\n",
773
+ "for ex in token_batch.to_list():\n",
774
+ " print(ex)"
775
+ ]
776
+ },
777
+ {
778
+ "cell_type": "markdown",
779
+ "metadata": {
780
+ "id": "UbdIaW6kX8hu"
781
+ },
782
+ "source": [
783
+ "If you replace the token IDs with their text representations (using `tf.gather`) you can see that in the first example the words `\"searchability\"` and `\"serendipity\"` have been decomposed into `\"search ##ability\"` and `\"s ##ere ##nd ##ip ##ity\"`:"
784
+ ]
785
+ },
786
+ {
787
+ "cell_type": "code",
788
+ "execution_count": 22,
789
+ "metadata": {
790
+ "execution": {
791
+ "iopub.execute_input": "2023-08-11T11:10:34.008883Z",
792
+ "iopub.status.busy": "2023-08-11T11:10:34.008433Z",
793
+ "iopub.status.idle": "2023-08-11T11:10:34.060789Z",
794
+ "shell.execute_reply": "2023-08-11T11:10:34.060215Z"
795
+ },
796
+ "id": "FA6nKYx5U3Nj"
797
+ },
798
+ "outputs": [
799
+ {
800
+ "data": {
801
+ "text/plain": [
802
+ "<tf.Tensor: shape=(3,), dtype=string, numpy=\n",
803
+ "array([b'and when you improve search ##ability , you actually take away the one advantage of print , which is s ##ere ##nd ##ip ##ity .',\n",
804
+ " b'but what if it were active ?',\n",
805
+ " b\"but they did n ' t test for curiosity .\"], dtype=object)>"
806
+ ]
807
+ },
808
+ "execution_count": 22,
809
+ "metadata": {},
810
+ "output_type": "execute_result"
811
+ }
812
+ ],
813
+ "source": [
814
+ "# Lookup each token id in the vocabulary.\n",
815
+ "txt_tokens = tf.gather(en_vocab, token_batch)\n",
816
+ "# Join with spaces.\n",
817
+ "tf.strings.reduce_join(txt_tokens, separator=' ', axis=-1)"
818
+ ]
819
+ },
820
+ {
821
+ "cell_type": "markdown",
822
+ "metadata": {
823
+ "id": "wY2XrhyRem2O"
824
+ },
825
+ "source": [
826
+ "To re-assemble words from the extracted tokens, use the `BertTokenizer.detokenize` method:"
827
+ ]
828
+ },
829
+ {
830
+ "cell_type": "code",
831
+ "execution_count": 23,
832
+ "metadata": {
833
+ "execution": {
834
+ "iopub.execute_input": "2023-08-11T11:10:34.064135Z",
835
+ "iopub.status.busy": "2023-08-11T11:10:34.063886Z",
836
+ "iopub.status.idle": "2023-08-11T11:10:34.119505Z",
837
+ "shell.execute_reply": "2023-08-11T11:10:34.118918Z"
838
+ },
839
+ "id": "toBXQSrgemRw"
840
+ },
841
+ "outputs": [
842
+ {
843
+ "data": {
844
+ "text/plain": [
845
+ "<tf.Tensor: shape=(3,), dtype=string, numpy=\n",
846
+ "array([b'and when you improve searchability , you actually take away the one advantage of print , which is serendipity .',\n",
847
+ " b'but what if it were active ?',\n",
848
+ " b\"but they did n ' t test for curiosity .\"], dtype=object)>"
849
+ ]
850
+ },
851
+ "execution_count": 23,
852
+ "metadata": {},
853
+ "output_type": "execute_result"
854
+ }
855
+ ],
856
+ "source": [
857
+ "words = en_tokenizer.detokenize(token_batch)\n",
858
+ "tf.strings.reduce_join(words, separator=' ', axis=-1)"
859
+ ]
860
+ },
861
+ {
862
+ "cell_type": "markdown",
863
+ "metadata": {
864
+ "id": "WIZWWy_iueQY"
865
+ },
866
+ "source": [
867
+ "> Note: `BertTokenizer.tokenize`/`BertTokenizer.detokenize` does not round\n",
868
+ "trip losslessly. The result of `detokenize` will not, in general, have the\n",
869
+ "same content or offsets as the input to `tokenize`. This is because of the\n",
870
+ "\"basic tokenization\" step, that splits the strings into words before\n",
871
+ "applying the `WordpieceTokenizer`, includes irreversible\n",
872
+ "steps like lower-casing and splitting on punctuation. `WordpieceTokenizer`\n",
873
+ "on the other hand **is** reversible."
874
+ ]
875
+ },
876
+ {
877
+ "cell_type": "markdown",
878
+ "metadata": {
879
+ "id": "_bN30iCexTPY"
880
+ },
881
+ "source": [
882
+ "## Customization and export\n",
883
+ "\n",
884
+ "This tutorial builds the text tokenizer and detokenizer used by the [Transformer](https://tensorflow.org/text/tutorials/transformer) tutorial. This section adds methods and processing steps to simplify that tutorial, and exports the tokenizers using `tf.saved_model` so they can be imported by the other tutorials."
885
+ ]
886
+ },
887
+ {
888
+ "cell_type": "markdown",
889
+ "metadata": {
890
+ "id": "5wpc7oFkwgni"
891
+ },
892
+ "source": [
893
+ "### Custom tokenization"
894
+ ]
895
+ },
896
+ {
897
+ "cell_type": "markdown",
898
+ "metadata": {
899
+ "id": "NaUR9hHj0PUy"
900
+ },
901
+ "source": [
902
+ "The downstream tutorials both expect the tokenized text to include `[START]` and `[END]` tokens.\n",
903
+ "\n",
904
+ "The `reserved_tokens` reserve space at the beginning of the vocabulary, so `[START]` and `[END]` have the same indexes for both languages:"
905
+ ]
906
+ },
907
+ {
908
+ "cell_type": "code",
909
+ "execution_count": 24,
910
+ "metadata": {
911
+ "execution": {
912
+ "iopub.execute_input": "2023-08-11T11:10:34.123281Z",
913
+ "iopub.status.busy": "2023-08-11T11:10:34.122781Z",
914
+ "iopub.status.idle": "2023-08-11T11:10:34.129983Z",
915
+ "shell.execute_reply": "2023-08-11T11:10:34.129450Z"
916
+ },
917
+ "id": "gyyoa5De0WQu"
918
+ },
919
+ "outputs": [],
920
+ "source": [
921
+ "START = tf.argmax(tf.constant(reserved_tokens) == \"[START]\")\n",
922
+ "END = tf.argmax(tf.constant(reserved_tokens) == \"[END]\")\n",
923
+ "\n",
924
+ "def add_start_end(ragged):\n",
925
+ " count = ragged.bounding_shape()[0]\n",
926
+ " starts = tf.fill([count,1], START)\n",
927
+ " ends = tf.fill([count,1], END)\n",
928
+ " return tf.concat([starts, ragged, ends], axis=1)"
929
+ ]
930
+ },
931
+ {
932
+ "cell_type": "code",
933
+ "execution_count": 25,
934
+ "metadata": {
935
+ "execution": {
936
+ "iopub.execute_input": "2023-08-11T11:10:34.133340Z",
937
+ "iopub.status.busy": "2023-08-11T11:10:34.132753Z",
938
+ "iopub.status.idle": "2023-08-11T11:10:34.188202Z",
939
+ "shell.execute_reply": "2023-08-11T11:10:34.187615Z"
940
+ },
941
+ "id": "MrZjQIwZ6NHu"
942
+ },
943
+ "outputs": [
944
+ {
945
+ "data": {
946
+ "text/plain": [
947
+ "<tf.Tensor: shape=(3,), dtype=string, numpy=\n",
948
+ "array([b'[START] and when you improve searchability , you actually take away the one advantage of print , which is serendipity . [END]',\n",
949
+ " b'[START] but what if it were active ? [END]',\n",
950
+ " b\"[START] but they did n ' t test for curiosity . [END]\"],\n",
951
+ " dtype=object)>"
952
+ ]
953
+ },
954
+ "execution_count": 25,
955
+ "metadata": {},
956
+ "output_type": "execute_result"
957
+ }
958
+ ],
959
+ "source": [
960
+ "words = en_tokenizer.detokenize(add_start_end(token_batch))\n",
961
+ "tf.strings.reduce_join(words, separator=' ', axis=-1)"
962
+ ]
963
+ },
964
+ {
965
+ "cell_type": "markdown",
966
+ "metadata": {
967
+ "id": "WMmHS5VT_suH"
968
+ },
969
+ "source": [
970
+ "### Custom detokenization\n",
971
+ "\n",
972
+ "Before exporting the tokenizers there are a couple of things you can cleanup for the downstream tutorials:\n",
973
+ "\n",
974
+ "1. They want to generate clean text output, so drop reserved tokens like `[START]`, `[END]` and `[PAD]`.\n",
975
+ "2. They're interested in complete strings, so apply a string join along the `words` axis of the result. "
976
+ ]
977
+ },
978
+ {
979
+ "cell_type": "code",
980
+ "execution_count": 26,
981
+ "metadata": {
982
+ "execution": {
983
+ "iopub.execute_input": "2023-08-11T11:10:34.191505Z",
984
+ "iopub.status.busy": "2023-08-11T11:10:34.191007Z",
985
+ "iopub.status.idle": "2023-08-11T11:10:34.195298Z",
986
+ "shell.execute_reply": "2023-08-11T11:10:34.194731Z"
987
+ },
988
+ "id": "x9vXUQPX1ZFA"
989
+ },
990
+ "outputs": [],
991
+ "source": [
992
+ "def cleanup_text(reserved_tokens, token_txt):\n",
993
+ " # Drop the reserved tokens, except for \"[UNK]\".\n",
994
+ " bad_tokens = [re.escape(tok) for tok in reserved_tokens if tok != \"[UNK]\"]\n",
995
+ " bad_token_re = \"|\".join(bad_tokens)\n",
996
+ " \n",
997
+ " bad_cells = tf.strings.regex_full_match(token_txt, bad_token_re)\n",
998
+ " result = tf.ragged.boolean_mask(token_txt, ~bad_cells)\n",
999
+ "\n",
1000
+ " # Join them into strings.\n",
1001
+ " result = tf.strings.reduce_join(result, separator=' ', axis=-1)\n",
1002
+ "\n",
1003
+ " return result"
1004
+ ]
1005
+ },
1006
+ {
1007
+ "cell_type": "code",
1008
+ "execution_count": 27,
1009
+ "metadata": {
1010
+ "execution": {
1011
+ "iopub.execute_input": "2023-08-11T11:10:34.198519Z",
1012
+ "iopub.status.busy": "2023-08-11T11:10:34.198142Z",
1013
+ "iopub.status.idle": "2023-08-11T11:10:34.202222Z",
1014
+ "shell.execute_reply": "2023-08-11T11:10:34.201689Z"
1015
+ },
1016
+ "id": "NMSpZUV7sQYw"
1017
+ },
1018
+ "outputs": [
1019
+ {
1020
+ "data": {
1021
+ "text/plain": [
1022
+ "array([b'and when you improve searchability , you actually take away the one advantage of print , which is serendipity .',\n",
1023
+ " b'but what if it were active ?',\n",
1024
+ " b\"but they did n't test for curiosity .\"], dtype=object)"
1025
+ ]
1026
+ },
1027
+ "execution_count": 27,
1028
+ "metadata": {},
1029
+ "output_type": "execute_result"
1030
+ }
1031
+ ],
1032
+ "source": [
1033
+ "en_examples.numpy()"
1034
+ ]
1035
+ },
1036
+ {
1037
+ "cell_type": "code",
1038
+ "execution_count": 28,
1039
+ "metadata": {
1040
+ "execution": {
1041
+ "iopub.execute_input": "2023-08-11T11:10:34.205335Z",
1042
+ "iopub.status.busy": "2023-08-11T11:10:34.204817Z",
1043
+ "iopub.status.idle": "2023-08-11T11:10:34.243506Z",
1044
+ "shell.execute_reply": "2023-08-11T11:10:34.242904Z"
1045
+ },
1046
+ "id": "yB3MJhNvkuBb"
1047
+ },
1048
+ "outputs": [
1049
+ {
1050
+ "data": {
1051
+ "text/plain": [
1052
+ "<tf.RaggedTensor [[b'and', b'when', b'you', b'improve', b'searchability', b',', b'you',\n",
1053
+ " b'actually', b'take', b'away', b'the', b'one', b'advantage', b'of',\n",
1054
+ " b'print', b',', b'which', b'is', b'serendipity', b'.'] ,\n",
1055
+ " [b'but', b'what', b'if', b'it', b'were', b'active', b'?'],\n",
1056
+ " [b'but', b'they', b'did', b'n', b\"'\", b't', b'test', b'for', b'curiosity',\n",
1057
+ " b'.'] ]>"
1058
+ ]
1059
+ },
1060
+ "execution_count": 28,
1061
+ "metadata": {},
1062
+ "output_type": "execute_result"
1063
+ }
1064
+ ],
1065
+ "source": [
1066
+ "token_batch = en_tokenizer.tokenize(en_examples).merge_dims(-2,-1)\n",
1067
+ "words = en_tokenizer.detokenize(token_batch)\n",
1068
+ "words"
1069
+ ]
1070
+ },
1071
+ {
1072
+ "cell_type": "code",
1073
+ "execution_count": 29,
1074
+ "metadata": {
1075
+ "execution": {
1076
+ "iopub.execute_input": "2023-08-11T11:10:34.246730Z",
1077
+ "iopub.status.busy": "2023-08-11T11:10:34.246329Z",
1078
+ "iopub.status.idle": "2023-08-11T11:10:34.272062Z",
1079
+ "shell.execute_reply": "2023-08-11T11:10:34.271514Z"
1080
+ },
1081
+ "id": "ED5rMeZE6HT3"
1082
+ },
1083
+ "outputs": [
1084
+ {
1085
+ "data": {
1086
+ "text/plain": [
1087
+ "array([b'and when you improve searchability , you actually take away the one advantage of print , which is serendipity .',\n",
1088
+ " b'but what if it were active ?',\n",
1089
+ " b\"but they did n ' t test for curiosity .\"], dtype=object)"
1090
+ ]
1091
+ },
1092
+ "execution_count": 29,
1093
+ "metadata": {},
1094
+ "output_type": "execute_result"
1095
+ }
1096
+ ],
1097
+ "source": [
1098
+ "cleanup_text(reserved_tokens, words).numpy()"
1099
+ ]
1100
+ },
1101
+ {
1102
+ "cell_type": "markdown",
1103
+ "metadata": {
1104
+ "id": "HEfEdRi11Re4"
1105
+ },
1106
+ "source": [
1107
+ "### Export"
1108
+ ]
1109
+ },
1110
+ {
1111
+ "cell_type": "markdown",
1112
+ "metadata": {
1113
+ "id": "uFuo1KZjpEPR"
1114
+ },
1115
+ "source": [
1116
+ "The following code block builds a `CustomTokenizer` class to contain the `text.BertTokenizer` instances, the custom logic, and the `@tf.function` wrappers required for export. "
1117
+ ]
1118
+ },
1119
+ {
1120
+ "cell_type": "code",
1121
+ "execution_count": 30,
1122
+ "metadata": {
1123
+ "execution": {
1124
+ "iopub.execute_input": "2023-08-11T11:10:34.275552Z",
1125
+ "iopub.status.busy": "2023-08-11T11:10:34.274994Z",
1126
+ "iopub.status.idle": "2023-08-11T11:10:34.284520Z",
1127
+ "shell.execute_reply": "2023-08-11T11:10:34.283895Z"
1128
+ },
1129
+ "id": "f1q1hCpH72Vj"
1130
+ },
1131
+ "outputs": [],
1132
+ "source": [
1133
+ "class CustomTokenizer(tf.Module):\n",
1134
+ " def __init__(self, reserved_tokens, vocab_path):\n",
1135
+ " self.tokenizer = text.BertTokenizer(vocab_path, lower_case=True)\n",
1136
+ " self._reserved_tokens = reserved_tokens\n",
1137
+ " self._vocab_path = tf.saved_model.Asset(vocab_path)\n",
1138
+ "\n",
1139
+ " vocab = pathlib.Path(vocab_path).read_text().splitlines()\n",
1140
+ " self.vocab = tf.Variable(vocab)\n",
1141
+ "\n",
1142
+ " ## Create the signatures for export: \n",
1143
+ "\n",
1144
+ " # Include a tokenize signature for a batch of strings. \n",
1145
+ " self.tokenize.get_concrete_function(\n",
1146
+ " tf.TensorSpec(shape=[None], dtype=tf.string))\n",
1147
+ " \n",
1148
+ " # Include `detokenize` and `lookup` signatures for:\n",
1149
+ " # * `Tensors` with shapes [tokens] and [batch, tokens]\n",
1150
+ " # * `RaggedTensors` with shape [batch, tokens]\n",
1151
+ " self.detokenize.get_concrete_function(\n",
1152
+ " tf.TensorSpec(shape=[None, None], dtype=tf.int64))\n",
1153
+ " self.detokenize.get_concrete_function(\n",
1154
+ " tf.RaggedTensorSpec(shape=[None, None], dtype=tf.int64))\n",
1155
+ "\n",
1156
+ " self.lookup.get_concrete_function(\n",
1157
+ " tf.TensorSpec(shape=[None, None], dtype=tf.int64))\n",
1158
+ " self.lookup.get_concrete_function(\n",
1159
+ " tf.RaggedTensorSpec(shape=[None, None], dtype=tf.int64))\n",
1160
+ "\n",
1161
+ " # These `get_*` methods take no arguments\n",
1162
+ " self.get_vocab_size.get_concrete_function()\n",
1163
+ " self.get_vocab_path.get_concrete_function()\n",
1164
+ " self.get_reserved_tokens.get_concrete_function()\n",
1165
+ " \n",
1166
+ " @tf.function\n",
1167
+ " def tokenize(self, strings):\n",
1168
+ " enc = self.tokenizer.tokenize(strings)\n",
1169
+ " # Merge the `word` and `word-piece` axes.\n",
1170
+ " enc = enc.merge_dims(-2,-1)\n",
1171
+ " enc = add_start_end(enc)\n",
1172
+ " return enc\n",
1173
+ "\n",
1174
+ " @tf.function\n",
1175
+ " def detokenize(self, tokenized):\n",
1176
+ " words = self.tokenizer.detokenize(tokenized)\n",
1177
+ " return cleanup_text(self._reserved_tokens, words)\n",
1178
+ "\n",
1179
+ " @tf.function\n",
1180
+ " def lookup(self, token_ids):\n",
1181
+ " return tf.gather(self.vocab, token_ids)\n",
1182
+ "\n",
1183
+ " @tf.function\n",
1184
+ " def get_vocab_size(self):\n",
1185
+ " return tf.shape(self.vocab)[0]\n",
1186
+ "\n",
1187
+ " @tf.function\n",
1188
+ " def get_vocab_path(self):\n",
1189
+ " return self._vocab_path\n",
1190
+ "\n",
1191
+ " @tf.function\n",
1192
+ " def get_reserved_tokens(self):\n",
1193
+ " return tf.constant(self._reserved_tokens)"
1194
+ ]
1195
+ },
1196
+ {
1197
+ "cell_type": "markdown",
1198
+ "metadata": {
1199
+ "id": "RHzEnTQM6nBD"
1200
+ },
1201
+ "source": [
1202
+ "Build a `CustomTokenizer` for each language:"
1203
+ ]
1204
+ },
1205
+ {
1206
+ "cell_type": "code",
1207
+ "execution_count": 31,
1208
+ "metadata": {
1209
+ "execution": {
1210
+ "iopub.execute_input": "2023-08-11T11:10:34.287910Z",
1211
+ "iopub.status.busy": "2023-08-11T11:10:34.287468Z",
1212
+ "iopub.status.idle": "2023-08-11T11:10:36.617150Z",
1213
+ "shell.execute_reply": "2023-08-11T11:10:36.616443Z"
1214
+ },
1215
+ "id": "cU8yFBCSruz4"
1216
+ },
1217
+ "outputs": [],
1218
+ "source": [
1219
+ "tokenizers = tf.Module()\n",
1220
+ "tokenizers.pt = CustomTokenizer(reserved_tokens, 'pt_vocab.txt')\n",
1221
+ "tokenizers.en = CustomTokenizer(reserved_tokens, 'en_vocab.txt')"
1222
+ ]
1223
+ },
1224
+ {
1225
+ "cell_type": "markdown",
1226
+ "metadata": {
1227
+ "id": "ZYfrmDhy6syT"
1228
+ },
1229
+ "source": [
1230
+ "Export the tokenizers as a `saved_model`:"
1231
+ ]
1232
+ },
1233
+ {
1234
+ "cell_type": "code",
1235
+ "execution_count": 32,
1236
+ "metadata": {
1237
+ "execution": {
1238
+ "iopub.execute_input": "2023-08-11T11:10:36.621401Z",
1239
+ "iopub.status.busy": "2023-08-11T11:10:36.620912Z",
1240
+ "iopub.status.idle": "2023-08-11T11:10:38.869823Z",
1241
+ "shell.execute_reply": "2023-08-11T11:10:38.869113Z"
1242
+ },
1243
+ "id": "aieDGooa9ms7"
1244
+ },
1245
+ "outputs": [],
1246
+ "source": [
1247
+ "model_name = 'ted_hrlr_translate_pt_en_converter'\n",
1248
+ "tf.saved_model.save(tokenizers, model_name)"
1249
+ ]
1250
+ },
1251
+ {
1252
+ "cell_type": "markdown",
1253
+ "metadata": {
1254
+ "id": "XoCMz2Fm61v6"
1255
+ },
1256
+ "source": [
1257
+ "Reload the `saved_model` and test the methods:"
1258
+ ]
1259
+ },
1260
+ {
1261
+ "cell_type": "code",
1262
+ "execution_count": 33,
1263
+ "metadata": {
1264
+ "execution": {
1265
+ "iopub.execute_input": "2023-08-11T11:10:38.874346Z",
1266
+ "iopub.status.busy": "2023-08-11T11:10:38.873757Z",
1267
+ "iopub.status.idle": "2023-08-11T11:10:39.621687Z",
1268
+ "shell.execute_reply": "2023-08-11T11:10:39.621100Z"
1269
+ },
1270
+ "id": "9SB_BHwqsHkb"
1271
+ },
1272
+ "outputs": [
1273
+ {
1274
+ "data": {
1275
+ "text/plain": [
1276
+ "7010"
1277
+ ]
1278
+ },
1279
+ "execution_count": 33,
1280
+ "metadata": {},
1281
+ "output_type": "execute_result"
1282
+ }
1283
+ ],
1284
+ "source": [
1285
+ "reloaded_tokenizers = tf.saved_model.load(model_name)\n",
1286
+ "reloaded_tokenizers.en.get_vocab_size().numpy()"
1287
+ ]
1288
+ },
1289
+ {
1290
+ "cell_type": "code",
1291
+ "execution_count": 34,
1292
+ "metadata": {
1293
+ "execution": {
1294
+ "iopub.execute_input": "2023-08-11T11:10:39.625499Z",
1295
+ "iopub.status.busy": "2023-08-11T11:10:39.624852Z",
1296
+ "iopub.status.idle": "2023-08-11T11:10:39.923428Z",
1297
+ "shell.execute_reply": "2023-08-11T11:10:39.922661Z"
1298
+ },
1299
+ "id": "W_Ze3WL3816x"
1300
+ },
1301
+ "outputs": [
1302
+ {
1303
+ "data": {
1304
+ "text/plain": [
1305
+ "array([[ 2, 4006, 2358, 687, 1192, 2365, 4, 3]])"
1306
+ ]
1307
+ },
1308
+ "execution_count": 34,
1309
+ "metadata": {},
1310
+ "output_type": "execute_result"
1311
+ }
1312
+ ],
1313
+ "source": [
1314
+ "tokens = reloaded_tokenizers.en.tokenize(['Hello TensorFlow!'])\n",
1315
+ "tokens.numpy()"
1316
+ ]
1317
+ },
1318
+ {
1319
+ "cell_type": "code",
1320
+ "execution_count": 35,
1321
+ "metadata": {
1322
+ "execution": {
1323
+ "iopub.execute_input": "2023-08-11T11:10:39.927142Z",
1324
+ "iopub.status.busy": "2023-08-11T11:10:39.926503Z",
1325
+ "iopub.status.idle": "2023-08-11T11:10:39.955316Z",
1326
+ "shell.execute_reply": "2023-08-11T11:10:39.954642Z"
1327
+ },
1328
+ "id": "v9o93bzcuhyC"
1329
+ },
1330
+ "outputs": [
1331
+ {
1332
+ "data": {
1333
+ "text/plain": [
1334
+ "<tf.RaggedTensor [[b'[START]', b'hello', b'tens', b'##or', b'##f', b'##low', b'!',\n",
1335
+ " b'[END]']]>"
1336
+ ]
1337
+ },
1338
+ "execution_count": 35,
1339
+ "metadata": {},
1340
+ "output_type": "execute_result"
1341
+ }
1342
+ ],
1343
+ "source": [
1344
+ "text_tokens = reloaded_tokenizers.en.lookup(tokens)\n",
1345
+ "text_tokens"
1346
+ ]
1347
+ },
1348
+ {
1349
+ "cell_type": "code",
1350
+ "execution_count": 36,
1351
+ "metadata": {
1352
+ "execution": {
1353
+ "iopub.execute_input": "2023-08-11T11:10:39.958507Z",
1354
+ "iopub.status.busy": "2023-08-11T11:10:39.958038Z",
1355
+ "iopub.status.idle": "2023-08-11T11:10:40.092706Z",
1356
+ "shell.execute_reply": "2023-08-11T11:10:40.091993Z"
1357
+ },
1358
+ "id": "Y0205N_8dDT5"
1359
+ },
1360
+ "outputs": [
1361
+ {
1362
+ "name": "stdout",
1363
+ "output_type": "stream",
1364
+ "text": [
1365
+ "hello tensorflow !\n"
1366
+ ]
1367
+ }
1368
+ ],
1369
+ "source": [
1370
+ "round_trip = reloaded_tokenizers.en.detokenize(tokens)\n",
1371
+ "\n",
1372
+ "print(round_trip.numpy()[0].decode('utf-8'))"
1373
+ ]
1374
+ },
1375
+ {
1376
+ "cell_type": "markdown",
1377
+ "metadata": {
1378
+ "id": "pSKFDQoBjnNp"
1379
+ },
1380
+ "source": [
1381
+ "Archive it for the [translation tutorials](https://tensorflow.org/text/tutorials/transformer):"
1382
+ ]
1383
+ },
1384
+ {
1385
+ "cell_type": "code",
1386
+ "execution_count": 37,
1387
+ "metadata": {
1388
+ "execution": {
1389
+ "iopub.execute_input": "2023-08-11T11:10:40.096200Z",
1390
+ "iopub.status.busy": "2023-08-11T11:10:40.095722Z",
1391
+ "iopub.status.idle": "2023-08-11T11:10:40.324144Z",
1392
+ "shell.execute_reply": "2023-08-11T11:10:40.323179Z"
1393
+ },
1394
+ "id": "eY0SoE3Yj2it"
1395
+ },
1396
+ "outputs": [
1397
+ {
1398
+ "name": "stdout",
1399
+ "output_type": "stream",
1400
+ "text": [
1401
+ " adding: ted_hrlr_translate_pt_en_converter/ (stored 0%)\r\n",
1402
+ " adding: ted_hrlr_translate_pt_en_converter/variables/ (stored 0%)\r\n",
1403
+ " adding: ted_hrlr_translate_pt_en_converter/variables/variables.data-00000-of-00001 (deflated 51%)\r\n",
1404
+ " adding: ted_hrlr_translate_pt_en_converter/variables/variables.index (deflated 33%)\r\n",
1405
+ " adding: ted_hrlr_translate_pt_en_converter/assets/ (stored 0%)\r\n",
1406
+ " adding: ted_hrlr_translate_pt_en_converter/assets/en_vocab.txt (deflated 54%)\r\n",
1407
+ " adding: ted_hrlr_translate_pt_en_converter/assets/pt_vocab.txt (deflated 57%)\r\n",
1408
+ " adding: ted_hrlr_translate_pt_en_converter/saved_model.pb (deflated 91%)\r\n",
1409
+ " adding: ted_hrlr_translate_pt_en_converter/fingerprint.pb (stored 0%)\r\n"
1410
+ ]
1411
+ }
1412
+ ],
1413
+ "source": [
1414
+ "!zip -r {model_name}.zip {model_name}"
1415
+ ]
1416
+ },
1417
+ {
1418
+ "cell_type": "code",
1419
+ "execution_count": 38,
1420
+ "metadata": {
1421
+ "execution": {
1422
+ "iopub.execute_input": "2023-08-11T11:10:40.328461Z",
1423
+ "iopub.status.busy": "2023-08-11T11:10:40.327855Z",
1424
+ "iopub.status.idle": "2023-08-11T11:10:40.516972Z",
1425
+ "shell.execute_reply": "2023-08-11T11:10:40.516101Z"
1426
+ },
1427
+ "id": "0Synq0RekAXe"
1428
+ },
1429
+ "outputs": [
1430
+ {
1431
+ "name": "stdout",
1432
+ "output_type": "stream",
1433
+ "text": [
1434
+ "168K\tted_hrlr_translate_pt_en_converter.zip\r\n"
1435
+ ]
1436
+ }
1437
+ ],
1438
+ "source": [
1439
+ "!du -h *.zip"
1440
+ ]
1441
+ },
1442
+ {
1443
+ "cell_type": "markdown",
1444
+ "metadata": {
1445
+ "id": "AtmGkGBuGHa2"
1446
+ },
1447
+ "source": [
1448
+ "<a id=\"algorithm\"></a>\n",
1449
+ "\n",
1450
+ "## Optional: The algorithm\n",
1451
+ "\n",
1452
+ "\n",
1453
+ "It's worth noting here that there are two versions of the WordPiece algorithm: Bottom-up and top-down. In both cases goal is the same: \"Given a training corpus and a number of desired\n",
1454
+ "tokens D, the optimization problem is to select D wordpieces such that the resulting corpus is minimal in the\n",
1455
+ "number of wordpieces when segmented according to the chosen wordpiece model.\"\n",
1456
+ "\n",
1457
+ "The original [bottom-up WordPiece algorithm](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf), is based on [byte-pair encoding](https://towardsdatascience.com/byte-pair-encoding-the-dark-horse-of-modern-nlp-eb36c7df4f10). Like BPE, It starts with the alphabet, and iteratively combines common bigrams to form word-pieces and words.\n",
1458
+ "\n",
1459
+ "TensorFlow Text's vocabulary generator follows the top-down implementation from [BERT](https://arxiv.org/pdf/1810.04805.pdf). Starting with words and breaking them down into smaller components until they hit the frequency threshold, or can't be broken down further. The next section describes this in detail. For Japanese, Chinese and Korean this top-down approach doesn't work since there are no explicit word units to start with. For those you need a [different approach](https://tfhub.dev/google/zh_segmentation/1).\n"
1460
+ ]
1461
+ },
1462
+ {
1463
+ "cell_type": "markdown",
1464
+ "metadata": {
1465
+ "id": "FLA2QhffYEo0"
1466
+ },
1467
+ "source": [
1468
+ "### Choosing the vocabulary\n",
1469
+ "\n",
1470
+ "The top-down WordPiece generation algorithm takes in a set of (word, count) pairs and a threshold `T`, and returns a vocabulary `V`.\n",
1471
+ "\n",
1472
+ "The algorithm is iterative. It is run for `k` iterations, where typically `k = 4`, but only the first two are really important. The third and fourth (and beyond) are just identical to the second. Note that each step of the binary search runs the algorithm from scratch for `k` iterations.\n",
1473
+ "\n",
1474
+ "The iterations described below:\n"
1475
+ ]
1476
+ },
1477
+ {
1478
+ "cell_type": "markdown",
1479
+ "metadata": {
1480
+ "id": "ZqfY0p3PYIKr"
1481
+ },
1482
+ "source": [
1483
+ "#### First iteration\n",
1484
+ "\n",
1485
+ "1. Iterate over every word and count pair in the input, denoted as `(w, c)`.\n",
1486
+ "2. For each word `w`, generate every substring, denoted as `s`. E.g., for the\n",
1487
+ " word `human`, we generate `{h, hu, hum, huma,\n",
1488
+ " human, ##u, ##um, ##uma, ##uman, ##m, ##ma, ##man, #a, ##an, ##n}`.\n",
1489
+ "3. Maintain a substring-to-count hash map, and increment the count of each `s`\n",
1490
+ " by `c`. E.g., if we have `(human, 113)` and `(humas, 3)` in our input, the\n",
1491
+ " count of `s = huma` will be `113+3=116`.\n",
1492
+ "4. Once we've collected the counts of every substring, iterate over the `(s,\n",
1493
+ " c)` pairs *starting with the longest `s` first*.\n",
1494
+ "5. Keep any `s` that has a `c > T`. E.g., if `T = 100` and we have `(pers,\n",
1495
+ " 231); (dogs, 259); (##rint; 76)`, then we would keep `pers` and `dogs`.\n",
1496
+ "6. When an `s` is kept, subtract off its count from all of its prefixes. This\n",
1497
+ " is the reason for sorting all of the `s` by length in step 4. This is a\n",
1498
+ " critical part of the algorithm, because otherwise words would be double\n",
1499
+ " counted. For example, let's say that we've kept `human` and we get to\n",
1500
+ " `(huma, 116)`. We know that `113` of those `116` came from `human`, and `3`\n",
1501
+ " came from `humas`. However, now that `human` is in our vocabulary, we know\n",
1502
+ " we will never segment `human` into `huma ##n`. So once `human` has been\n",
1503
+ " kept, then `huma` only has an *effective* count of `3`.\n",
1504
+ "\n",
1505
+ "This algorithm will generate a set of word pieces `s` (many of which will be\n",
1506
+ "whole words `w`), which we *could* use as our WordPiece vocabulary.\n",
1507
+ "\n",
1508
+ "However, there is a problem: This algorithm will severely overgenerate word\n",
1509
+ "pieces. The reason is that we only subtract off counts of prefix tokens.\n",
1510
+ "Therefore, if we keep the word `human`, we will subtract off the count for `h,\n",
1511
+ "hu, hu, huma`, but not for `##u, ##um, ##uma, ##uman` and so on. So we might\n",
1512
+ "generate both `human` and `##uman` as word pieces, even though `##uman` will\n",
1513
+ "never be applied.\n",
1514
+ "\n",
1515
+ "So why not subtract off the counts for every *substring*, not just every\n",
1516
+ "*prefix*? Because then we could end up subtracting off the counts multiple\n",
1517
+ "times. Let's say that we're processing `s` of length 5 and we keep both\n",
1518
+ "`(##denia, 129)` and `(##eniab, 137)`, where `65` of those counts came from the\n",
1519
+ "word `undeniable`. If we subtract off from *every* substring, we would subtract\n",
1520
+ "`65` from the substring `##enia` twice, even though we should only subtract\n",
1521
+ "once. However, if we only subtract off from prefixes, it will correctly only be\n",
1522
+ "subtracted once."
1523
+ ]
1524
+ },
1525
+ {
1526
+ "cell_type": "markdown",
1527
+ "metadata": {
1528
+ "id": "NNCtKR8xT9wX"
1529
+ },
1530
+ "source": [
1531
+ "#### Second (and third ...) iteration\n",
1532
+ "\n",
1533
+ "To solve the overgeneration issue mentioned above, we perform multiple\n",
1534
+ "iterations of the algorithm.\n",
1535
+ "\n",
1536
+ "Subsequent iterations are identical to the first, with one important\n",
1537
+ "distinction: In step 2, instead of considering *every* substring, we apply the\n",
1538
+ "WordPiece tokenization algorithm using the vocabulary from the previous\n",
1539
+ "iteration, and only consider substrings which *start* on a split point.\n",
1540
+ "\n",
1541
+ "For example, let's say that we're performing step 2 of the algorithm and\n",
1542
+ "encounter the word `undeniable`. In the first iteration, we would consider every\n",
1543
+ "substring, e.g., `{u, un, und, ..., undeniable, ##n, ##nd, ..., ##ndeniable,\n",
1544
+ "...}`.\n",
1545
+ "\n",
1546
+ "Now, for the second iteration, we will only consider a subset of these. Let's\n",
1547
+ "say that after the first iteration, the relevant word pieces are:\n",
1548
+ "\n",
1549
+ "`un, ##deni, ##able, ##ndeni, ##iable`\n",
1550
+ "\n",
1551
+ "The WordPiece algorithm will segment this into `un ##deni ##able` (see the\n",
1552
+ "section [Applying WordPiece](#applying-wordpiece) for more information). In this\n",
1553
+ "case, we will only consider substrings that *start* at a segmentation point. We\n",
1554
+ "will still consider every possible *end* position. So during the second\n",
1555
+ "iteration, the set of `s` for `undeniable` is:\n",
1556
+ "\n",
1557
+ "`{u, un, und, unden, undeni, undenia, undeniab, undeniabl,\n",
1558
+ "undeniable, ##d, ##de, ##den, ##deni, ##denia, ##deniab, ##deniabl\n",
1559
+ ", ##deniable, ##a, ##ab, ##abl, ##able}`\n",
1560
+ "\n",
1561
+ "The algorithm is otherwise identical. In this example, in the first iteration,\n",
1562
+ "the algorithm produces the spurious tokens `##ndeni` and `##iable`. Now, these\n",
1563
+ "tokens are never considered, so they will not be generated by the second\n",
1564
+ "iteration. We perform several iterations just to make sure the results converge\n",
1565
+ "(although there is no literal convergence guarantee).\n"
1566
+ ]
1567
+ },
1568
+ {
1569
+ "cell_type": "markdown",
1570
+ "metadata": {
1571
+ "id": "AdUkqe84YQA5"
1572
+ },
1573
+ "source": [
1574
+ "### Applying WordPiece\n",
1575
+ "\n",
1576
+ "<a id=\"applying_wordpiece\"></a>\n",
1577
+ "\n",
1578
+ "Once a WordPiece vocabulary has been generated, we need to be able to apply it\n",
1579
+ "to new data. The algorithm is a simple greedy longest-match-first application.\n",
1580
+ "\n",
1581
+ "For example, consider segmenting the word `undeniable`.\n",
1582
+ "\n",
1583
+ "We first lookup `undeniable` in our WordPiece dictionary, and if it's present,\n",
1584
+ "we're done. If not, we decrement the end point by one character, and repeat,\n",
1585
+ "e.g., `undeniabl`.\n",
1586
+ "\n",
1587
+ "Eventually, we will either find a subtoken in our vocabulary, or get down to a\n",
1588
+ "single character subtoken. (In general, we assume that every character is in our\n",
1589
+ "vocabulary, although this might not be the case for rare Unicode characters. If\n",
1590
+ "we encounter a rare Unicode character that's not in the vocabulary we simply map\n",
1591
+ "the entire word to `<unk>`).\n",
1592
+ "\n",
1593
+ "In this case, we find `un` in our vocabulary. So that's our first word piece.\n",
1594
+ "Then we jump to the end of `un` and repeat the processing, e.g., try to find\n",
1595
+ "`##deniable`, then `##deniabl`, etc. This is repeated until we've segmented the\n",
1596
+ "entire word."
1597
+ ]
1598
+ },
1599
+ {
1600
+ "cell_type": "markdown",
1601
+ "metadata": {
1602
+ "id": "rjRQKQzpYMl2"
1603
+ },
1604
+ "source": [
1605
+ "### Intuition\n",
1606
+ "\n",
1607
+ "Intuitively, WordPiece tokenization is trying to satisfy two different\n",
1608
+ "objectives:\n",
1609
+ "\n",
1610
+ "1. Tokenize the data into the *least* number of pieces as possible. It is\n",
1611
+ " important to keep in mind that the WordPiece algorithm does not \"want\" to\n",
1612
+ " split words. Otherwise, it would just split every word into its characters,\n",
1613
+ " e.g., `human -> {h, ##u, ##m, ##a, #n}`. This is one critical thing that\n",
1614
+ " makes WordPiece different from morphological splitters, which will split\n",
1615
+ " linguistic morphemes even for common words (e.g., `unwanted -> {un, want,\n",
1616
+ " ed}`).\n",
1617
+ "\n",
1618
+ "2. When a word does have to be split into pieces, split it into pieces that\n",
1619
+ " have maximal counts in the training data. For example, the reason why the\n",
1620
+ " word `undeniable` would be split into `{un, ##deni, ##able}` rather than\n",
1621
+ " alternatives like `{unde, ##niab, ##le}` is that the counts for `un` and\n",
1622
+ " `##able` in particular will be very high, since these are common prefixes\n",
1623
+ " and suffixes. Even though the count for `##le` must be higher than `##able`,\n",
1624
+ " the low counts of `unde` and `##niab` will make this a less \"desirable\"\n",
1625
+ " tokenization to the algorithm."
1626
+ ]
1627
+ },
1628
+ {
1629
+ "cell_type": "markdown",
1630
+ "metadata": {
1631
+ "id": "KQZ38Uus-Xv1"
1632
+ },
1633
+ "source": [
1634
+ "## Optional: tf.lookup\n",
1635
+ "\n",
1636
+ "<a id=\"tf.lookup\"></a>"
1637
+ ]
1638
+ },
1639
+ {
1640
+ "cell_type": "markdown",
1641
+ "metadata": {
1642
+ "id": "NreDSRmJNG_h"
1643
+ },
1644
+ "source": [
1645
+ "If you need access to, or more control over the vocabulary it's worth noting that you can build the lookup table yourself and pass that to `BertTokenizer`.\n",
1646
+ "\n",
1647
+ "When you pass a string, `BertTokenizer` does the following:"
1648
+ ]
1649
+ },
1650
+ {
1651
+ "cell_type": "code",
1652
+ "execution_count": 39,
1653
+ "metadata": {
1654
+ "execution": {
1655
+ "iopub.execute_input": "2023-08-11T11:10:40.521579Z",
1656
+ "iopub.status.busy": "2023-08-11T11:10:40.521287Z",
1657
+ "iopub.status.idle": "2023-08-11T11:10:40.528923Z",
1658
+ "shell.execute_reply": "2023-08-11T11:10:40.528340Z"
1659
+ },
1660
+ "id": "thAF1DzQOQXl"
1661
+ },
1662
+ "outputs": [],
1663
+ "source": [
1664
+ "pt_lookup = tf.lookup.StaticVocabularyTable(\n",
1665
+ " num_oov_buckets=1,\n",
1666
+ " initializer=tf.lookup.TextFileInitializer(\n",
1667
+ " filename='pt_vocab.txt',\n",
1668
+ " key_dtype=tf.string,\n",
1669
+ " key_index = tf.lookup.TextFileIndex.WHOLE_LINE,\n",
1670
+ " value_dtype = tf.int64,\n",
1671
+ " value_index=tf.lookup.TextFileIndex.LINE_NUMBER)) \n",
1672
+ "pt_tokenizer = text.BertTokenizer(pt_lookup)"
1673
+ ]
1674
+ },
1675
+ {
1676
+ "cell_type": "markdown",
1677
+ "metadata": {
1678
+ "id": "ERY4FYN7O66R"
1679
+ },
1680
+ "source": [
1681
+ "Now you have direct access to the lookup table used in the tokenizer."
1682
+ ]
1683
+ },
1684
+ {
1685
+ "cell_type": "code",
1686
+ "execution_count": 40,
1687
+ "metadata": {
1688
+ "execution": {
1689
+ "iopub.execute_input": "2023-08-11T11:10:40.532362Z",
1690
+ "iopub.status.busy": "2023-08-11T11:10:40.531811Z",
1691
+ "iopub.status.idle": "2023-08-11T11:10:40.539287Z",
1692
+ "shell.execute_reply": "2023-08-11T11:10:40.538647Z"
1693
+ },
1694
+ "id": "337_DcAMOs6N"
1695
+ },
1696
+ "outputs": [
1697
+ {
1698
+ "data": {
1699
+ "text/plain": [
1700
+ "<tf.Tensor: shape=(5,), dtype=int64, numpy=array([7765, 85, 86, 87, 7765])>"
1701
+ ]
1702
+ },
1703
+ "execution_count": 40,
1704
+ "metadata": {},
1705
+ "output_type": "execute_result"
1706
+ }
1707
+ ],
1708
+ "source": [
1709
+ "pt_lookup.lookup(tf.constant(['é', 'um', 'uma', 'para', 'não']))"
1710
+ ]
1711
+ },
1712
+ {
1713
+ "cell_type": "markdown",
1714
+ "metadata": {
1715
+ "id": "BdZ82x5mPDE9"
1716
+ },
1717
+ "source": [
1718
+ "You don't need to use a vocabulary file, `tf.lookup` has other initializer options. If you have the vocabulary in memory you can use `lookup.KeyValueTensorInitializer`:"
1719
+ ]
1720
+ },
1721
+ {
1722
+ "cell_type": "code",
1723
+ "execution_count": 41,
1724
+ "metadata": {
1725
+ "execution": {
1726
+ "iopub.execute_input": "2023-08-11T11:10:40.542840Z",
1727
+ "iopub.status.busy": "2023-08-11T11:10:40.542258Z",
1728
+ "iopub.status.idle": "2023-08-11T11:10:40.555927Z",
1729
+ "shell.execute_reply": "2023-08-11T11:10:40.555329Z"
1730
+ },
1731
+ "id": "mzkrmO9H-b9i"
1732
+ },
1733
+ "outputs": [],
1734
+ "source": [
1735
+ "pt_lookup = tf.lookup.StaticVocabularyTable(\n",
1736
+ " num_oov_buckets=1,\n",
1737
+ " initializer=tf.lookup.KeyValueTensorInitializer(\n",
1738
+ " keys=pt_vocab,\n",
1739
+ " values=tf.range(len(pt_vocab), dtype=tf.int64))) \n",
1740
+ "pt_tokenizer = text.BertTokenizer(pt_lookup)"
1741
+ ]
1742
+ }
1743
+ ],
1744
+ "metadata": {
1745
+ "colab": {
1746
+ "collapsed_sections": [],
1747
+ "name": "subwords_tokenizer.ipynb",
1748
+ "toc_visible": true
1749
+ },
1750
+ "kernelspec": {
1751
+ "display_name": "Python 3",
1752
+ "name": "python3"
1753
+ },
1754
+ "language_info": {
1755
+ "codemirror_mode": {
1756
+ "name": "ipython",
1757
+ "version": 3
1758
+ },
1759
+ "file_extension": ".py",
1760
+ "mimetype": "text/x-python",
1761
+ "name": "python",
1762
+ "nbconvert_exporter": "python",
1763
+ "pygments_lexer": "ipython3",
1764
+ "version": "3.9.17"
1765
+ }
1766
+ },
1767
+ "nbformat": 4,
1768
+ "nbformat_minor": 0
1769
+ }