Upload tokenizer
Browse files- special_tokens_map.json +20 -0
- tokenization_SEA_BPE.py +156 -0
- tokenizer.model +3 -0
- tokenizer_config.json +41 -0
special_tokens_map.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<unk>",
|
4 |
+
"<|endoftext|>"
|
5 |
+
],
|
6 |
+
"eos_token": {
|
7 |
+
"content": "<|endoftext|>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false
|
12 |
+
},
|
13 |
+
"unk_token": {
|
14 |
+
"content": "<unk>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false
|
19 |
+
}
|
20 |
+
}
|
tokenization_SEA_BPE.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from shutil import copyfile
|
3 |
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
|
4 |
+
import sentencepiece as spm
|
5 |
+
from tokenizers import processors
|
6 |
+
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
|
7 |
+
from transformers.utils import logging
|
8 |
+
logger = logging.get_logger(__name__)
|
9 |
+
VOCAB_FILES_NAMES = {'vocab_file': 'tokenizer.model'}
|
10 |
+
SPIECE_UNDERLINE = '▁'
|
11 |
+
|
12 |
+
class SEABPETokenizer(PreTrainedTokenizer):
|
13 |
+
"""
|
14 |
+
Construct the SEA BPE Tokenizer tailored for SEA languages. Based on the Byte-Pair-Encoding with an expanded voculabulary size
|
15 |
+
|
16 |
+
Args:
|
17 |
+
vocab_file (`str`):
|
18 |
+
Path to the vocabulary file.
|
19 |
+
legacy (`bool`, *optional*, defaults to `True`):
|
20 |
+
Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622
|
21 |
+
which includes fixes to properly handle tokens that appear after special tokens.
|
22 |
+
legacy means we are not modifying existing tokenizers without knowing. (And we need to manually update those core tokenizers)
|
23 |
+
|
24 |
+
A simple example:
|
25 |
+
|
26 |
+
- `legacy=True`:
|
27 |
+
```python
|
28 |
+
>>> from transformers import T5Tokenizer
|
29 |
+
|
30 |
+
>>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=True)
|
31 |
+
>>> tokenizer.encode("Hello <extra_id_0>.")
|
32 |
+
[8774, 32099, 3, 5, 1]
|
33 |
+
```
|
34 |
+
- `legacy=False`:
|
35 |
+
```python
|
36 |
+
>>> from transformers import T5Tokenizer
|
37 |
+
|
38 |
+
>>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False)
|
39 |
+
>>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
|
40 |
+
[8774, 32099, 5, 1]
|
41 |
+
```
|
42 |
+
Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for
|
43 |
+
more details.
|
44 |
+
|
45 |
+
"""
|
46 |
+
|
47 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
48 |
+
|
49 |
+
def __init__(self, vocab_file, unk_token='<unk>', bos_token=None, eos_token='<|endoftext|>', pad_token=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, add_bos_token=False, add_eos_token=False, clean_up_tokenization_spaces=False, legacy=None, **kwargs):
|
50 |
+
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
51 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
52 |
+
self.sp_model.Load(vocab_file)
|
53 |
+
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, sp_model_kwargs=self.sp_model_kwargs, clean_up_tokenization_spaces=clean_up_tokenization_spaces, legacy=legacy, **kwargs)
|
54 |
+
if legacy is None:
|
55 |
+
logger.warning_once(f'You are using the default legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to read the related pull request available at https://github.com/huggingface/transformers/pull/24565, and set the legacy attribute accordingly.')
|
56 |
+
legacy = True
|
57 |
+
self.legacy = legacy
|
58 |
+
self.vocab_file = vocab_file
|
59 |
+
self.add_bos_token = add_bos_token
|
60 |
+
self.add_eos_token = add_eos_token
|
61 |
+
|
62 |
+
def __getstate__(self):
|
63 |
+
state = self.__dict__.copy()
|
64 |
+
state['sp_model'] = None
|
65 |
+
state['sp_model_proto'] = self.sp_model.serialized_model_proto()
|
66 |
+
return state
|
67 |
+
|
68 |
+
def __setstate__(self, d):
|
69 |
+
self.__dict__ = d
|
70 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
71 |
+
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
|
72 |
+
|
73 |
+
@property
|
74 |
+
def vocab_size(self):
|
75 |
+
"""Returns vocab size"""
|
76 |
+
return self.sp_model.get_piece_size()
|
77 |
+
|
78 |
+
def get_vocab(self):
|
79 |
+
"""Returns vocab as a dict"""
|
80 |
+
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
|
81 |
+
vocab.update(self.added_tokens_encoder)
|
82 |
+
return vocab
|
83 |
+
|
84 |
+
def tokenize(self, text, **kwargs) -> List[str]:
|
85 |
+
if not self.legacy:
|
86 |
+
text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, ' ')
|
87 |
+
return super().tokenize(text, **kwargs)
|
88 |
+
|
89 |
+
def _tokenize(self, text):
|
90 |
+
"""
|
91 |
+
Returns a tokenized string.
|
92 |
+
|
93 |
+
Since the sentencepiece internal model always adds a SPIECE_UNDERLINE, at the beginning of the provided text,
|
94 |
+
we need to remove it by hand when the current text is a subsequence. This happens whenever the `self.tokenize`
|
95 |
+
function is called with specials tokens: the input is split on the special tokens, and each subsequence is
|
96 |
+
passed to `_tokenize`. Thus if a subsequence did not start with a `" "` or SPIECE_UNDERLINE, we have to remove
|
97 |
+
the extra `SPIECE_UNDERLINE` prepended.
|
98 |
+
"""
|
99 |
+
if not self.legacy:
|
100 |
+
is_first = text.startswith(SPIECE_UNDERLINE)
|
101 |
+
if is_first:
|
102 |
+
text = text[1:]
|
103 |
+
tokens = self.sp_model.encode(text, out_type=str)
|
104 |
+
if not self.legacy and (not is_first) and (not text.startswith(' ')) and tokens[0].startswith(SPIECE_UNDERLINE):
|
105 |
+
tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
|
106 |
+
return tokens
|
107 |
+
|
108 |
+
def _convert_token_to_id(self, token):
|
109 |
+
"""Converts a token (str) in an id using the vocab."""
|
110 |
+
return self.sp_model.piece_to_id(token)
|
111 |
+
|
112 |
+
def _convert_id_to_token(self, index):
|
113 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
114 |
+
token = self.sp_model.IdToPiece(index)
|
115 |
+
return token
|
116 |
+
|
117 |
+
def convert_tokens_to_string(self, tokens):
|
118 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
119 |
+
current_sub_tokens = []
|
120 |
+
out_string = ''
|
121 |
+
prev_is_special = False
|
122 |
+
for (i, token) in enumerate(tokens):
|
123 |
+
if token in self.all_special_tokens:
|
124 |
+
if not prev_is_special and i != 0:
|
125 |
+
out_string += ' '
|
126 |
+
out_string += self.sp_model.decode(current_sub_tokens) + token
|
127 |
+
prev_is_special = True
|
128 |
+
current_sub_tokens = []
|
129 |
+
else:
|
130 |
+
current_sub_tokens.append(token)
|
131 |
+
prev_is_special = False
|
132 |
+
out_string += self.sp_model.decode(current_sub_tokens)
|
133 |
+
return out_string
|
134 |
+
|
135 |
+
def save_vocabulary(self, save_directory, filename_prefix: Optional[str]=None) -> Tuple[str]:
|
136 |
+
"""
|
137 |
+
Save the vocabulary and special tokens file to a directory.
|
138 |
+
|
139 |
+
Args:
|
140 |
+
save_directory (`str`):
|
141 |
+
The directory in which to save the vocabulary.
|
142 |
+
|
143 |
+
Returns:
|
144 |
+
`Tuple(str)`: Paths to the files saved.
|
145 |
+
"""
|
146 |
+
if not os.path.isdir(save_directory):
|
147 |
+
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
|
148 |
+
return
|
149 |
+
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
|
150 |
+
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
|
151 |
+
copyfile(self.vocab_file, out_vocab_file)
|
152 |
+
elif not os.path.isfile(self.vocab_file):
|
153 |
+
with open(out_vocab_file, 'wb') as fi:
|
154 |
+
content_spiece_model = self.sp_model.serialized_model_proto()
|
155 |
+
fi.write(content_spiece_model)
|
156 |
+
return (out_vocab_file,)
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c0c576972c98fa150efff77f61a30b46afbc1247ff4697f39e51e90d0a8b2190
|
3 |
+
size 4569957
|
tokenizer_config.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<|endoftext|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
}
|
21 |
+
},
|
22 |
+
"additional_special_tokens": [
|
23 |
+
"<unk>",
|
24 |
+
"<|endoftext|>"
|
25 |
+
],
|
26 |
+
"auto_map": {
|
27 |
+
"AutoTokenizer": [
|
28 |
+
"tokenization_SEA_BPE.SEABPETokenizer",
|
29 |
+
null
|
30 |
+
]
|
31 |
+
},
|
32 |
+
"bos_token": null,
|
33 |
+
"clean_up_tokenization_spaces": false,
|
34 |
+
"eos_token": "<|endoftext|>",
|
35 |
+
"legacy": true,
|
36 |
+
"model_max_length": 1000000000000000019884624838656,
|
37 |
+
"pad_token": null,
|
38 |
+
"sp_model_kwargs": {},
|
39 |
+
"tokenizer_class": "SEABPETokenizer",
|
40 |
+
"unk_token": "<unk>"
|
41 |
+
}
|