Recag commited on
Commit
4debd9d
1 Parent(s): dfe9366

Upload tokenizer

Browse files
Tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1504f414204462660029b54dac52411d134f37c71c505aed4fd1e3129246533f
3
+ size 317447
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 5001,
3
+ "<|startoftext|>": 5000
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 BharatTech Tech Ecosystem Pvt. Ltd. and the HuggingFace Inc. team. All rights reserved.
3
+
4
+
5
+ """Tokenization classes for Bharatai."""
6
+ import os
7
+ from shutil import copyfile
8
+ from typing import Any, Dict, List, Optional, Tuple
9
+
10
+ import sentencepiece as spm
11
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
12
+ from transformers.utils import logging
13
+
14
+ logger = logging.get_logger(__name__)
15
+
16
+ VOCAB_FILES_NAMES = {"vocab_file": "Tokenizer.model"}
17
+
18
+ PRETRAINED_VOCAB_FILES_MAP = {
19
+ "vocab_file": {},
20
+ "tokenizer_file": {},
21
+ }
22
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
23
+
24
+
25
+ class BharataiTokenizer(PreTrainedTokenizer):
26
+ """
27
+ Construct a custom tokenizer. Based on byte-level Byte-Pair-Encoding.
28
+ Args:
29
+ vocab_file (`str`):
30
+ Path to the vocabulary file.
31
+ """
32
+
33
+ vocab_files_names = VOCAB_FILES_NAMES
34
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
35
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
36
+ model_input_names = ["input_ids", "attention_mask"]
37
+
38
+ def __init__(
39
+ self,
40
+ vocab_file,
41
+ unk_token="<unk>",
42
+ bos_token="<|startoftext|>",
43
+ eos_token="<|endoftext|>",
44
+ pad_token="<unk>",
45
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
46
+ add_bos_token=True,
47
+ add_eos_token=False,
48
+ clean_up_tokenization_spaces=False,
49
+ **kwargs,
50
+ ):
51
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
52
+ bos_token = (
53
+ AddedToken(bos_token, lstrip=False, rstrip=False)
54
+ if isinstance(bos_token, str)
55
+ else bos_token
56
+ )
57
+ eos_token = (
58
+ AddedToken(eos_token, lstrip=False, rstrip=False)
59
+ if isinstance(eos_token, str)
60
+ else eos_token
61
+ )
62
+ unk_token = (
63
+ AddedToken(unk_token, lstrip=False, rstrip=False)
64
+ if isinstance(unk_token, str)
65
+ else unk_token
66
+ )
67
+ pad_token = (
68
+ AddedToken(pad_token, lstrip=False, rstrip=False)
69
+ if isinstance(pad_token, str)
70
+ else pad_token
71
+ )
72
+ self.vocab_file = vocab_file
73
+ self.add_bos_token = add_bos_token
74
+ self.add_eos_token = add_eos_token
75
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
76
+ self.sp_model.Load(vocab_file)
77
+ super().__init__(
78
+ bos_token=bos_token,
79
+ eos_token=eos_token,
80
+ unk_token=unk_token,
81
+ pad_token=pad_token,
82
+ add_bos_token=add_bos_token,
83
+ add_eos_token=add_eos_token,
84
+ sp_model_kwargs=self.sp_model_kwargs,
85
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
86
+ **kwargs,
87
+ )
88
+
89
+ def __getstate__(self):
90
+ state = self.__dict__.copy()
91
+ state["sp_model"] = None
92
+ return state
93
+
94
+ def __setstate__(self, d):
95
+ self.__dict__ = d
96
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
97
+ self.sp_model.Load(self.vocab_file)
98
+
99
+ @property
100
+ def vocab_size(self):
101
+ """Returns vocab size"""
102
+ return self.sp_model.get_piece_size()
103
+
104
+ def get_vocab(self):
105
+ """Returns vocab as a dict"""
106
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
107
+ vocab.update(self.added_tokens_encoder)
108
+ return vocab
109
+
110
+ def _tokenize(self, text):
111
+ """Returns a tokenized string."""
112
+ return self.sp_model.encode(text, out_type=str)
113
+
114
+ def _convert_token_to_id(self, token):
115
+ """Converts a token (str) in an id using the vocab."""
116
+ return self.sp_model.piece_to_id(token)
117
+
118
+ def _convert_id_to_token(self, index):
119
+ """Converts an index (integer) in a token (str) using the vocab."""
120
+ token = self.sp_model.IdToPiece(index)
121
+ return token
122
+
123
+ def convert_tokens_to_string(self, tokens):
124
+ """Converts a sequence of tokens (string) in a single string."""
125
+ current_sub_tokens = []
126
+ out_string = ""
127
+ prev_is_special = False
128
+ for i, token in enumerate(tokens):
129
+ # make sure that special tokens are not decoded using sentencepiece model
130
+ if token in self.all_special_tokens:
131
+ if not prev_is_special and i != 0:
132
+ out_string += " "
133
+ out_string += self.sp_model.decode(current_sub_tokens) + token
134
+ prev_is_special = True
135
+ current_sub_tokens = []
136
+ else:
137
+ current_sub_tokens.append(token)
138
+ prev_is_special = False
139
+ out_string += self.sp_model.decode(current_sub_tokens)
140
+ return out_string
141
+
142
+ def save_vocabulary(
143
+ self, save_directory, filename_prefix: Optional[str] = None
144
+ ) -> Tuple[str]:
145
+ """
146
+ Save the vocabulary and special tokens file to a directory.
147
+ Args:
148
+ save_directory (`str`):
149
+ The directory in which to save the vocabulary.
150
+ Returns:
151
+ `Tuple(str)`: Paths to the files saved.
152
+ """
153
+ if not os.path.isdir(save_directory):
154
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
155
+ return
156
+ out_vocab_file = os.path.join(
157
+ save_directory,
158
+ (filename_prefix + "-" if filename_prefix else "")
159
+ + VOCAB_FILES_NAMES["vocab_file"],
160
+ )
161
+
162
+ if os.path.abspath(self.vocab_file) != os.path.abspath(
163
+ out_vocab_file
164
+ ) and os.path.isfile(self.vocab_file):
165
+ copyfile(self.vocab_file, out_vocab_file)
166
+ elif not os.path.isfile(self.vocab_file):
167
+ with open(out_vocab_file, "wb") as fi:
168
+ content_spiece_model = self.sp_model.serialized_model_proto()
169
+ fi.write(content_spiece_model)
170
+
171
+ return (out_vocab_file,)
172
+
173
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
174
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
175
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
176
+
177
+ output = bos_token_id + token_ids_0 + eos_token_id
178
+
179
+ if token_ids_1 is not None:
180
+ output = output + bos_token_id + token_ids_1 + eos_token_id
181
+
182
+ return output
183
+
184
+ def get_special_tokens_mask(
185
+ self,
186
+ token_ids_0: List[int],
187
+ token_ids_1: Optional[List[int]] = None,
188
+ already_has_special_tokens: bool = False,
189
+ ) -> List[int]:
190
+ """
191
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
192
+ special tokens using the tokenizer `prepare_for_model` method.
193
+ Args:
194
+ token_ids_0 (`List[int]`):
195
+ List of IDs.
196
+ token_ids_1 (`List[int]`, *optional*):
197
+ Optional second list of IDs for sequence pairs.
198
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
199
+ Whether or not the token list is already formatted with special tokens for the model.
200
+ Returns:
201
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
202
+ """
203
+ if already_has_special_tokens:
204
+ return super().get_special_tokens_mask(
205
+ token_ids_0=token_ids_0,
206
+ token_ids_1=token_ids_1,
207
+ already_has_special_tokens=True,
208
+ )
209
+
210
+ bos_token_id = [1] if self.add_bos_token else []
211
+ eos_token_id = [1] if self.add_eos_token else []
212
+
213
+ if token_ids_1 is None:
214
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
215
+ return (
216
+ bos_token_id
217
+ + ([0] * len(token_ids_0))
218
+ + eos_token_id
219
+ + bos_token_id
220
+ + ([0] * len(token_ids_1))
221
+ + eos_token_id
222
+ )
223
+
224
+ def create_token_type_ids_from_sequences(
225
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
226
+ ) -> List[int]:
227
+ """
228
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
229
+ sequence pair mask has the following format:
230
+ ```
231
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
232
+ | first sequence | second sequence |
233
+ ```
234
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
235
+ Args:
236
+ token_ids_0 (`List[int]`):
237
+ List of ids.
238
+ token_ids_1 (`List[int]`, *optional*):
239
+ Optional second list of IDs for sequence pairs.
240
+ Returns:
241
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
242
+ """
243
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
244
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
245
+
246
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
247
+
248
+ if token_ids_1 is not None:
249
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
250
+
251
+ return output
252
+ @property
253
+ def default_chat_template(self):
254
+ """
255
+ Bharatai uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.
256
+ Assistant messages do not have special tokens, because Bharatai chat models are generally trained with strict
257
+ user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering
258
+ rather than needing special tokens. The system message is partly 'embedded' in the first user message, which
259
+ results in an unusual token ordering when it is present. This template should definitely be changed if you wish
260
+ to fine-tune a model with more flexible role ordering!
261
+
262
+ The output should look something like:
263
+
264
+ <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos><bos>[INST] Prompt [/INST] Answer <eos>
265
+ <bos>[INST] Prompt [/INST]
266
+
267
+
268
+ """
269
+ logger.warning_once(
270
+ "\nNo chat template is defined for this tokenizer - using the default template "
271
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
272
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
273
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
274
+ )
275
+ template = (
276
+ "{% if messages[0]['role'] == 'system' %}"
277
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
278
+ "{% set system_message = messages[0]['content'] %}"
279
+ "{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}"
280
+ "{% set loop_messages = messages %}" # Or use the default system message if the flag is set
281
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
282
+ "{% else %}"
283
+ "{% set loop_messages = messages %}"
284
+ "{% set system_message = false %}"
285
+ "{% endif %}"
286
+ "{% for message in loop_messages %}" # Loop over all non-system messages
287
+ "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
288
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
289
+ "{% endif %}"
290
+ "{% if loop.index0 == 0 and system_message != false %}" # Embed system message in first message
291
+ "{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}"
292
+ "{% else %}"
293
+ "{% set content = message['content'] %}"
294
+ "{% endif %}"
295
+ "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way
296
+ "{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}"
297
+ "{% elif message['role'] == 'system' %}"
298
+ "{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}"
299
+ "{% elif message['role'] == 'assistant' %}"
300
+ "{{ ' ' + content.strip() + ' ' + eos_token }}"
301
+ "{% endif %}"
302
+ "{% endfor %}"
303
+ )
304
+ template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
305
+ default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
306
+ template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
307
+
308
+ return template
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "5000": {
14
+ "content": "<|startoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "5001": {
22
+ "content": "<|endoftext|>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "auto_map": {
31
+ "AutoTokenizer": [
32
+ "tokenizer.BharataiTokenizer",
33
+ null
34
+ ]
35
+ },
36
+ "bos_token": "<|startoftext|>",
37
+ "clean_up_tokenization_spaces": false,
38
+ "eos_token": "<|endoftext|>",
39
+ "model_max_length": 1000000000000000019884624838656,
40
+ "pad_token": "<unk>",
41
+ "sp_model_kwargs": {},
42
+ "tokenizer_class": "BharataiTokenizer",
43
+ "unk_token": "<unk>"
44
+ }